hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
1_a_crack.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o 1_a_crack 1_a_crack.cu ./1_a_crack Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "AA1111"; char plain_password2[] = "BB2222"; char plain_password3[] = "SS3333"; char plain_password4[] = "ZZ4444"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char a1,b2,c3,d4;//variables char password[7]; password[6] = '\0'; //block id threrad id initilized int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(a1='0'; a1<='9'; a1++){ for(b2='0'; b2<='9'; b2++){ for(c3='0'; c3<='9'; c3++){ for(d4='0'; d4<='9'; d4++){ password[2] = a1; password[3] = b2; password[4] = c3; password[5] = d4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); } } } } } } //time difference int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); //kernal function that determine block and threads to use hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
1_a_crack.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o 1_a_crack 1_a_crack.cu ./1_a_crack Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "AA1111"; char plain_password2[] = "BB2222"; char plain_password3[] = "SS3333"; char plain_password4[] = "ZZ4444"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char a1,b2,c3,d4;//variables char password[7]; password[6] = '\0'; //block id threrad id initilized int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(a1='0'; a1<='9'; a1++){ for(b2='0'; b2<='9'; b2++){ for(c3='0'; c3<='9'; c3++){ for(d4='0'; d4<='9'; d4++){ password[2] = a1; password[3] = b2; password[4] = c3; password[5] = d4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); } } } } } } //time difference int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); //kernal function that determine block and threads to use kernel <<<26,26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
ef4f55445fe9dc22d2a6ee41b1deef9d6339afa7.hip
// !!! This is a file automatically generated by hipify!!! /* * @Author: Haozhe Xie * @Date: 2019-08-07 20:54:24 * @Last Modified by: Haozhe Xie * @Last Modified time: 2020-06-17 14:58:55 * @Email: cshzxie@gmail.com */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <torch/extension.h> #include <vector> __global__ void chamfer_dist_kernel(int batch_size, int n, const float* xyz1, int m, const float* xyz2, float* dist, int* indexes) { const int batch = 512; __shared__ float buf[batch * 3]; for (int i = blockIdx.x; i < batch_size; i += gridDim.x) { for (int k2 = 0; k2 < m; k2 += batch) { int end_k = min(m, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { buf[j] = xyz2[(i * m + k2) * 3 + j]; } __syncthreads(); for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; float best_dist = 0; int best_dist_index = 0; int end_ka = end_k - (end_k & 3); if (end_ka == batch) { for (int k = 0; k < batch; k += 4) { { float x2 = buf[k * 3 + 0] - x1; float y2 = buf[k * 3 + 1] - y1; float z2 = buf[k * 3 + 2] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { float x2 = buf[k * 3 + 3] - x1; float y2 = buf[k * 3 + 4] - y1; float z2 = buf[k * 3 + 5] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { float x2 = buf[k * 3 + 6] - x1; float y2 = buf[k * 3 + 7] - y1; float z2 = buf[k * 3 + 8] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { float x2 = buf[k * 3 + 9] - x1; float y2 = buf[k * 3 + 10] - y1; float z2 = buf[k * 3 + 11] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } else { for (int k = 0; k < end_ka; k += 4) { { float x2 = buf[k * 3 + 0] - x1; float y2 = buf[k * 3 + 1] - y1; float z2 = buf[k * 3 + 2] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { float x2 = buf[k * 3 + 3] - x1; float y2 = buf[k * 3 + 4] - y1; float z2 = buf[k * 3 + 5] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { float x2 = buf[k * 3 + 6] - x1; float y2 = buf[k * 3 + 7] - y1; float z2 = buf[k * 3 + 8] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { float x2 = buf[k * 3 + 9] - x1; float y2 = buf[k * 3 + 10] - y1; float z2 = buf[k * 3 + 11] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } for (int k = end_ka; k < end_k; k++) { float x2 = buf[k * 3 + 0] - x1; float y2 = buf[k * 3 + 1] - y1; float z2 = buf[k * 3 + 2] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } if (k2 == 0 || dist[(i * n + j)] > best_dist) { dist[(i * n + j)] = best_dist; indexes[(i * n + j)] = best_dist_index; } } __syncthreads(); } } } std::vector<torch::Tensor> chamfer_cuda_forward(torch::Tensor xyz1, torch::Tensor xyz2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor dist1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kFloat)); torch::Tensor dist2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kFloat)); torch::Tensor idx1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kInt)); torch::Tensor idx2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kInt)); hipLaunchKernelGGL(( chamfer_dist_kernel), dim3(dim3(32, 16, 1)), dim3(512), 0, 0, batch_size, n, xyz1.data_ptr<float>(), m, xyz2.data_ptr<float>(), dist1.data_ptr<float>(), idx1.data_ptr<int>()); hipLaunchKernelGGL(( chamfer_dist_kernel), dim3(dim3(32, 16, 1)), dim3(512), 0, 0, batch_size, m, xyz2.data_ptr<float>(), n, xyz1.data_ptr<float>(), dist2.data_ptr<float>(), idx2.data_ptr<int>()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in chamfer_cuda_forward: %s\n", hipGetErrorString(err)); } return {dist1, dist2, idx1, idx2}; } __global__ void chamfer_dist_grad_kernel(int b, int n, const float* xyz1, int m, const float* xyz2, const float* grad_dist1, const int* idx1, float* grad_xyz1, float* grad_xyz2) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx1[i * n + j]; float x2 = xyz2[(i * m + j2) * 3 + 0]; float y2 = xyz2[(i * m + j2) * 3 + 1]; float z2 = xyz2[(i * m + j2) * 3 + 2]; float g = grad_dist1[i * n + j] * 2; atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2)); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2))); } } } std::vector<torch::Tensor> chamfer_cuda_backward(torch::Tensor xyz1, torch::Tensor xyz2, torch::Tensor idx1, torch::Tensor idx2, torch::Tensor grad_dist1, torch::Tensor grad_dist2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor grad_xyz1 = torch::zeros_like(xyz1, torch::CUDA(torch::kFloat)); torch::Tensor grad_xyz2 = torch::zeros_like(xyz2, torch::CUDA(torch::kFloat)); hipLaunchKernelGGL(( chamfer_dist_grad_kernel), dim3(dim3(1, 16, 1)), dim3(256), 0, 0, batch_size, n, xyz1.data_ptr<float>(), m, xyz2.data_ptr<float>(), grad_dist1.data_ptr<float>(), idx1.data_ptr<int>(), grad_xyz1.data_ptr<float>(), grad_xyz2.data_ptr<float>()); hipLaunchKernelGGL(( chamfer_dist_grad_kernel), dim3(dim3(1, 16, 1)), dim3(256), 0, 0, batch_size, m, xyz2.data_ptr<float>(), n, xyz1.data_ptr<float>(), grad_dist2.data_ptr<float>(), idx2.data_ptr<int>(), grad_xyz2.data_ptr<float>(), grad_xyz1.data_ptr<float>()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in chamfer_cuda_backward: %s\n", hipGetErrorString(err)); } return {grad_xyz1, grad_xyz2}; }
ef4f55445fe9dc22d2a6ee41b1deef9d6339afa7.cu
/* * @Author: Haozhe Xie * @Date: 2019-08-07 20:54:24 * @Last Modified by: Haozhe Xie * @Last Modified time: 2020-06-17 14:58:55 * @Email: cshzxie@gmail.com */ #include <cuda.h> #include <cuda_runtime.h> #include <torch/extension.h> #include <vector> __global__ void chamfer_dist_kernel(int batch_size, int n, const float* xyz1, int m, const float* xyz2, float* dist, int* indexes) { const int batch = 512; __shared__ float buf[batch * 3]; for (int i = blockIdx.x; i < batch_size; i += gridDim.x) { for (int k2 = 0; k2 < m; k2 += batch) { int end_k = min(m, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { buf[j] = xyz2[(i * m + k2) * 3 + j]; } __syncthreads(); for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; float best_dist = 0; int best_dist_index = 0; int end_ka = end_k - (end_k & 3); if (end_ka == batch) { for (int k = 0; k < batch; k += 4) { { float x2 = buf[k * 3 + 0] - x1; float y2 = buf[k * 3 + 1] - y1; float z2 = buf[k * 3 + 2] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { float x2 = buf[k * 3 + 3] - x1; float y2 = buf[k * 3 + 4] - y1; float z2 = buf[k * 3 + 5] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { float x2 = buf[k * 3 + 6] - x1; float y2 = buf[k * 3 + 7] - y1; float z2 = buf[k * 3 + 8] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { float x2 = buf[k * 3 + 9] - x1; float y2 = buf[k * 3 + 10] - y1; float z2 = buf[k * 3 + 11] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } else { for (int k = 0; k < end_ka; k += 4) { { float x2 = buf[k * 3 + 0] - x1; float y2 = buf[k * 3 + 1] - y1; float z2 = buf[k * 3 + 2] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } { float x2 = buf[k * 3 + 3] - x1; float y2 = buf[k * 3 + 4] - y1; float z2 = buf[k * 3 + 5] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 1; } } { float x2 = buf[k * 3 + 6] - x1; float y2 = buf[k * 3 + 7] - y1; float z2 = buf[k * 3 + 8] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 2; } } { float x2 = buf[k * 3 + 9] - x1; float y2 = buf[k * 3 + 10] - y1; float z2 = buf[k * 3 + 11] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (dist < best_dist) { best_dist = dist; best_dist_index = k + k2 + 3; } } } } for (int k = end_ka; k < end_k; k++) { float x2 = buf[k * 3 + 0] - x1; float y2 = buf[k * 3 + 1] - y1; float z2 = buf[k * 3 + 2] - z1; float dist = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || dist < best_dist) { best_dist = dist; best_dist_index = k + k2; } } if (k2 == 0 || dist[(i * n + j)] > best_dist) { dist[(i * n + j)] = best_dist; indexes[(i * n + j)] = best_dist_index; } } __syncthreads(); } } } std::vector<torch::Tensor> chamfer_cuda_forward(torch::Tensor xyz1, torch::Tensor xyz2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor dist1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kFloat)); torch::Tensor dist2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kFloat)); torch::Tensor idx1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kInt)); torch::Tensor idx2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kInt)); chamfer_dist_kernel<<<dim3(32, 16, 1), 512>>>( batch_size, n, xyz1.data_ptr<float>(), m, xyz2.data_ptr<float>(), dist1.data_ptr<float>(), idx1.data_ptr<int>()); chamfer_dist_kernel<<<dim3(32, 16, 1), 512>>>( batch_size, m, xyz2.data_ptr<float>(), n, xyz1.data_ptr<float>(), dist2.data_ptr<float>(), idx2.data_ptr<int>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in chamfer_cuda_forward: %s\n", cudaGetErrorString(err)); } return {dist1, dist2, idx1, idx2}; } __global__ void chamfer_dist_grad_kernel(int b, int n, const float* xyz1, int m, const float* xyz2, const float* grad_dist1, const int* idx1, float* grad_xyz1, float* grad_xyz2) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx1[i * n + j]; float x2 = xyz2[(i * m + j2) * 3 + 0]; float y2 = xyz2[(i * m + j2) * 3 + 1]; float z2 = xyz2[(i * m + j2) * 3 + 2]; float g = grad_dist1[i * n + j] * 2; atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2)); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2))); atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2))); } } } std::vector<torch::Tensor> chamfer_cuda_backward(torch::Tensor xyz1, torch::Tensor xyz2, torch::Tensor idx1, torch::Tensor idx2, torch::Tensor grad_dist1, torch::Tensor grad_dist2) { const int batch_size = xyz1.size(0); const int n = xyz1.size(1); // num_points point cloud A const int m = xyz2.size(1); // num_points point cloud B torch::Tensor grad_xyz1 = torch::zeros_like(xyz1, torch::CUDA(torch::kFloat)); torch::Tensor grad_xyz2 = torch::zeros_like(xyz2, torch::CUDA(torch::kFloat)); chamfer_dist_grad_kernel<<<dim3(1, 16, 1), 256>>>( batch_size, n, xyz1.data_ptr<float>(), m, xyz2.data_ptr<float>(), grad_dist1.data_ptr<float>(), idx1.data_ptr<int>(), grad_xyz1.data_ptr<float>(), grad_xyz2.data_ptr<float>()); chamfer_dist_grad_kernel<<<dim3(1, 16, 1), 256>>>( batch_size, m, xyz2.data_ptr<float>(), n, xyz1.data_ptr<float>(), grad_dist2.data_ptr<float>(), idx2.data_ptr<int>(), grad_xyz2.data_ptr<float>(), grad_xyz1.data_ptr<float>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in chamfer_cuda_backward: %s\n", cudaGetErrorString(err)); } return {grad_xyz1, grad_xyz2}; }
2020bd8f703aa705e2b94e700ca82807ff9b66d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include "reductionADD.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void slice(float* ptrTabDev, int nbSlice); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static __device__ float fpi(float x); static __device__ void reductionIntraThread(float* tabSM, int nbSlice); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void slice(float* ptrTabDev, int nbSlice) { extern __shared__ float tabSM[]; //Reduction intra-thread reductionIntraThread(tabSM, nbSlice); __syncthreads(); //Reduction interblock reductionADD<float>(tabSM, ptrTabDev); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void reductionIntraThread(float* tabSM, int nbSlice) { const int NB_THREAD = Indice2D::nbThread(); const int TID = Indice2D::tid(); //TODO pattern entrelacement const int TID_LOCAL = Indice1D::tidLocal(); const float DX = 1.0f / (float) nbSlice; float sumThread = 0; int s = TID; while (s < nbSlice) { float x = s * DX; sumThread += fpi(x); s += NB_THREAD; } tabSM[TID_LOCAL] = 4 * sumThread * DX; } __device__ float fpi(float x) { return 1 / (1.0 + x * x); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
2020bd8f703aa705e2b94e700ca82807ff9b66d9.cu
#include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include "reductionADD.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void slice(float* ptrTabDev, int nbSlice); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static __device__ float fpi(float x); static __device__ void reductionIntraThread(float* tabSM, int nbSlice); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void slice(float* ptrTabDev, int nbSlice) { extern __shared__ float tabSM[]; //Reduction intra-thread reductionIntraThread(tabSM, nbSlice); __syncthreads(); //Reduction interblock reductionADD<float>(tabSM, ptrTabDev); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void reductionIntraThread(float* tabSM, int nbSlice) { const int NB_THREAD = Indice2D::nbThread(); const int TID = Indice2D::tid(); //TODO pattern entrelacement const int TID_LOCAL = Indice1D::tidLocal(); const float DX = 1.0f / (float) nbSlice; float sumThread = 0; int s = TID; while (s < nbSlice) { float x = s * DX; sumThread += fpi(x); s += NB_THREAD; } tabSM[TID_LOCAL] = 4 * sumThread * DX; } __device__ float fpi(float x) { return 1 / (1.0 + x * x); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
c65cdfd1caabe62c6b7cf9c3dd4bc0e214a01c2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm50(typename AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm70(typename AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm75(typename AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm80(typename AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_rf_sm50(typename AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_rf_sm70(typename AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_rf_sm75(typename AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x128_rf_sm80(typename AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x128_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm50(typename AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm70(typename AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm75(typename AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm80(typename AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
c65cdfd1caabe62c6b7cf9c3dd4bc0e214a01c2a.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm50(typename AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm70(typename AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm75(typename AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x64_rf_sm80(typename AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x64_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_rf_sm50(typename AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_rf_sm70(typename AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_rf_sm75(typename AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_64x128_rf_sm80(typename AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_64x128_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm50(typename AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm70(typename AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm75(typename AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f32_aligned_32x128_gmem_sm80(typename AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<float, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f32_aligned_32x128_gmem_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
10039943818f9e7185ae50fecd23cfa1c68b4c4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> /* __global__ void kernel(float* input0,float* input1,float* output0){ extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[]; float v3; v3 = 0.0; for (int i4 = 0;i4 < 256;i4++){ v3 = (v3+(input0[((blockIdx.x*256)+i4)]*input1[((i4*256)+threadIdx.x)])); } ((float*)(sbase+1024))[threadIdx.x] = v3; ((float*)sbase)[threadIdx.x] = ((float*)(sbase+1024))[threadIdx.x]; output0[((blockIdx.x*256)+threadIdx.x)] = ((float*)sbase)[threadIdx.x]; } __global__ void kernel(float* input0,float* input1,float* output0){ extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[]; float v1; v1 = 0.0; for (int i2 = 0;i2 < 256;i2++){ v1 = (v1+(input0[((blockIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)])); } output0[((blockIdx.x*256)+threadIdx.x)] = v1; } __global__ void kernel(float* input0,float* input1,float* output0){ float v1; v1 = 0.0; for (int i2 = 0;i2 < 256;i2++){ v1 = (v1+(input0[((blockIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)])); } output0[((blockIdx.x*256)+threadIdx.x)] = v1; } */ /* number of threads needed 256*/ __global__ void kernel(float* input0,float* input1,float* output0){ float v1; v1 = 0.0; for (int i2 = 0;i2 < 256;i2++){ v1 = (v1+(input0[((threadIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)])); } __syncthreads(); output0[((blockIdx.x*256)+threadIdx.x)] = v1; } #define N (256*256) int main(int argc, char **argv){ float *v1; float *v2; float *r; float *rc; float *dv1; float *dv2; float *dr; v1 = (float*)malloc(sizeof(float) *N); v2 = (float*)malloc(sizeof(float) *N); r = (float*)malloc(sizeof(float) *N); rc = (float*)malloc(sizeof(float) *N); //generate input data for (int i = 0; i < N; ++i) { v1[i] = i % 4 ; v2[i] = 1.34; //int j = i / 256; //int k = i % 256; //if (j == k) v2[i] = 1.0; } hipMalloc((void**)&dv1, sizeof(float) * N ); hipMalloc((void**)&dv2, sizeof(float) * N ); hipMalloc((void**)&dr, sizeof(float) * N ); hipMemcpy(dv1, v1, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(dv2, v2, sizeof(float) * N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel), dim3(256), dim3(256),2048* sizeof(float), 0, dv1,dv2,dr); hipMemcpy(r, dr, sizeof(float) * N , hipMemcpyDeviceToHost); hipFree(dv1); hipFree(dv2); hipFree(dr); // show results for (int i = 0; i < N; ++i) { printf("%f ", r[i]); } /* CPU Matrix mult */ for (int i=0;i<256;i++){ for(int j=0;j<256;j++){ for(int k=0;k<256;k++){ rc[i*256+j] += v1[i*256+k] * v2[k*256+j]; } } } for (int i = 0; i < N; ++i) { if (rc[i] != r[i]) { printf("differs! %f %f \n ", rc[i], r[i]); } } /* compare few first */ printf("\n Compare a few \n"); for (int i = 0; i < 10; ++i) { printf("%f %f \n", rc[i], r[i]); } }
10039943818f9e7185ae50fecd23cfa1c68b4c4b.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> /* __global__ void kernel(float* input0,float* input1,float* output0){ extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[]; float v3; v3 = 0.0; for (int i4 = 0;i4 < 256;i4++){ v3 = (v3+(input0[((blockIdx.x*256)+i4)]*input1[((i4*256)+threadIdx.x)])); } ((float*)(sbase+1024))[threadIdx.x] = v3; ((float*)sbase)[threadIdx.x] = ((float*)(sbase+1024))[threadIdx.x]; output0[((blockIdx.x*256)+threadIdx.x)] = ((float*)sbase)[threadIdx.x]; } __global__ void kernel(float* input0,float* input1,float* output0){ extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[]; float v1; v1 = 0.0; for (int i2 = 0;i2 < 256;i2++){ v1 = (v1+(input0[((blockIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)])); } output0[((blockIdx.x*256)+threadIdx.x)] = v1; } __global__ void kernel(float* input0,float* input1,float* output0){ float v1; v1 = 0.0; for (int i2 = 0;i2 < 256;i2++){ v1 = (v1+(input0[((blockIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)])); } output0[((blockIdx.x*256)+threadIdx.x)] = v1; } */ /* number of threads needed 256*/ __global__ void kernel(float* input0,float* input1,float* output0){ float v1; v1 = 0.0; for (int i2 = 0;i2 < 256;i2++){ v1 = (v1+(input0[((threadIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)])); } __syncthreads(); output0[((blockIdx.x*256)+threadIdx.x)] = v1; } #define N (256*256) int main(int argc, char **argv){ float *v1; float *v2; float *r; float *rc; float *dv1; float *dv2; float *dr; v1 = (float*)malloc(sizeof(float) *N); v2 = (float*)malloc(sizeof(float) *N); r = (float*)malloc(sizeof(float) *N); rc = (float*)malloc(sizeof(float) *N); //generate input data for (int i = 0; i < N; ++i) { v1[i] = i % 4 ; v2[i] = 1.34; //int j = i / 256; //int k = i % 256; //if (j == k) v2[i] = 1.0; } cudaMalloc((void**)&dv1, sizeof(float) * N ); cudaMalloc((void**)&dv2, sizeof(float) * N ); cudaMalloc((void**)&dr, sizeof(float) * N ); cudaMemcpy(dv1, v1, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(dv2, v2, sizeof(float) * N, cudaMemcpyHostToDevice); kernel<<<256, 256,2048* sizeof(float)>>>(dv1,dv2,dr); cudaMemcpy(r, dr, sizeof(float) * N , cudaMemcpyDeviceToHost); cudaFree(dv1); cudaFree(dv2); cudaFree(dr); // show results for (int i = 0; i < N; ++i) { printf("%f ", r[i]); } /* CPU Matrix mult */ for (int i=0;i<256;i++){ for(int j=0;j<256;j++){ for(int k=0;k<256;k++){ rc[i*256+j] += v1[i*256+k] * v2[k*256+j]; } } } for (int i = 0; i < N; ++i) { if (rc[i] != r[i]) { printf("differs! %f %f \n ", rc[i], r[i]); } } /* compare few first */ printf("\n Compare a few \n"); for (int i = 0; i < 10; ++i) { printf("%f %f \n", rc[i], r[i]); } }
97e11fb5915bccab59b525f4a960c7bd60fe856c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/TemporalUpSamplingLinear.cu" #else #include <THHUNN/linear_upsampling.h> static inline void THNN_(TemporalUpSamplingLinear_shapeCheck) (THCState *state, THCTensor *input, THCTensor *gradOutput, int nBatch, int nChannels, int inputWidth, int outputWidth) { THArgCheck(inputWidth > 0 && outputWidth > 0, 2, "input and output sizes should be greater than 0," " but got input (W: %d) output (W: %d)", inputWidth, outputWidth); if (input != NULL) { THCUNN_argCheck(state, !input->is_empty() && input->dim() == 3, 2, input, "non-empty 3D input tensor expected but got: %s"); } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 3, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 3, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth); } } void THNN_(TemporalUpSamplingLinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputWidth, bool align_corners) { int nbatch = THCTensor_(size)(state, input, 0); int channels = THCTensor_(size)(state, input, 1); int inputWidth = THCTensor_(size)(state, input, 2); THNN_(TemporalUpSamplingLinear_shapeCheck) (state, input, NULL, nbatch, channels, inputWidth, outputWidth); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resize3d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputWidth); THCTensor_(zero)(state, output); THCDeviceTensor<scalar_t, 3> idata = toDeviceTensor<scalar_t, 3>(state, input); THCDeviceTensor<scalar_t, 3> odata = toDeviceTensor<scalar_t, 3>(state, output); THAssert(inputWidth > 0 && outputWidth > 0); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; hipStream_t stream = THCState_getCurrentStream(state); hipLaunchKernelGGL(( caffe_gpu_interp2_kernel<scalar_t, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads) , 0 , stream, num_kernels, rwidth, align_corners, idata, odata); THCudaCheck(hipGetLastError()); } void THNN_(TemporalUpSamplingLinear_updateGradInput)( THCState *state, THCTensor *gradOutput, THCTensor *gradInput, int nbatch, int nchannels, int inputWidth, int outputWidth, bool align_corners) { THNN_(TemporalUpSamplingLinear_shapeCheck) (state, NULL, gradOutput, nbatch, nchannels, inputWidth, outputWidth); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize3d)(state, gradInput, nbatch, nchannels, inputWidth); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 3> data1 = toDeviceTensor<scalar_t, 3>(state, gradInput); THCDeviceTensor<scalar_t, 3> data2 = toDeviceTensor<scalar_t, 3>(state, gradOutput); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; hipStream_t stream = THCState_getCurrentStream(state); hipLaunchKernelGGL(( caffe_gpu_interp2_kernel_backward<scalar_t ,accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, data1, data2); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
97e11fb5915bccab59b525f4a960c7bd60fe856c.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/TemporalUpSamplingLinear.cu" #else #include <THCUNN/linear_upsampling.h> static inline void THNN_(TemporalUpSamplingLinear_shapeCheck) (THCState *state, THCTensor *input, THCTensor *gradOutput, int nBatch, int nChannels, int inputWidth, int outputWidth) { THArgCheck(inputWidth > 0 && outputWidth > 0, 2, "input and output sizes should be greater than 0," " but got input (W: %d) output (W: %d)", inputWidth, outputWidth); if (input != NULL) { THCUNN_argCheck(state, !input->is_empty() && input->dim() == 3, 2, input, "non-empty 3D input tensor expected but got: %s"); } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 3, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 3, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth); } } void THNN_(TemporalUpSamplingLinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputWidth, bool align_corners) { int nbatch = THCTensor_(size)(state, input, 0); int channels = THCTensor_(size)(state, input, 1); int inputWidth = THCTensor_(size)(state, input, 2); THNN_(TemporalUpSamplingLinear_shapeCheck) (state, input, NULL, nbatch, channels, inputWidth, outputWidth); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resize3d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputWidth); THCTensor_(zero)(state, output); THCDeviceTensor<scalar_t, 3> idata = toDeviceTensor<scalar_t, 3>(state, input); THCDeviceTensor<scalar_t, 3> odata = toDeviceTensor<scalar_t, 3>(state, output); THAssert(inputWidth > 0 && outputWidth > 0); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); caffe_gpu_interp2_kernel<scalar_t, accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads , 0 , stream>>>(num_kernels, rwidth, align_corners, idata, odata); THCudaCheck(cudaGetLastError()); } void THNN_(TemporalUpSamplingLinear_updateGradInput)( THCState *state, THCTensor *gradOutput, THCTensor *gradInput, int nbatch, int nchannels, int inputWidth, int outputWidth, bool align_corners) { THNN_(TemporalUpSamplingLinear_shapeCheck) (state, NULL, gradOutput, nbatch, nchannels, inputWidth, outputWidth); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize3d)(state, gradInput, nbatch, nchannels, inputWidth); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 3> data1 = toDeviceTensor<scalar_t, 3>(state, gradInput); THCDeviceTensor<scalar_t, 3> data2 = toDeviceTensor<scalar_t, 3>(state, gradOutput); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); caffe_gpu_interp2_kernel_backward<scalar_t ,accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
2ae6b9063f03541d39d3bb4fef0ff48f6e5a31b1.hip
// !!! This is a file automatically generated by hipify!!! #include "../header/cudatool.h" void cudaErrT(hipError_t err, int line, char* file){ if (err != hipSuccess){ printf( "\n*** Cuda error in file '%s' in line %i : %s. ***\n\n", file, line, hipGetErrorString(err)); exit(EXIT_FAILURE); } }
2ae6b9063f03541d39d3bb4fef0ff48f6e5a31b1.cu
#include "../header/cudatool.h" void cudaErrT(cudaError_t err, int line, char* file){ if (err != cudaSuccess){ printf( "\n*** Cuda error in file '%s' in line %i : %s. ***\n\n", file, line, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
4154154ba694a5a61cc4e3f018b4778ce65770ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2016 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "../debug.h" #define N ( 1 << 27 ) #define THREADS_PER_BLOCK 256 /* only works for float currently. double will break this code due to lack of 64bit floating point atomics */ #define FLOATTYPE_T float /* sumReduction kernel using atomics */ __global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *sum) { /* calculate global index in the array */ int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; /* return if my global index is larger than the array size */ if( globalIndex >= n ) return; /* grid stride loop where array is larger than number of threads * launched, using atomics */ for( int i = globalIndex; i < n; i += blockDim.x * gridDim.x ) { atomicAdd( sum, in[i] ); } /* end for */ return; } int main() { FLOATTYPE_T *h_in, h_sum, cpu_sum; FLOATTYPE_T *d_in, *d_sum; int size = N; int memBytes = size * sizeof( FLOATTYPE_T ); /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); /* allocate space for device copies of in, out */ checkCUDA( hipMalloc( &d_in, memBytes ) ); checkCUDA( hipMalloc( &d_sum, sizeof(FLOATTYPE_T) ) ); /* allocate space for host copies of in, out and setup input values */ h_in = (FLOATTYPE_T *)malloc( memBytes ); for( int i = 0; i < size; i++ ) { h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 ); if( i % 2 == 0 ) h_in[i] = -h_in[i]; } h_sum = 0.0; cpu_sum = 0.0; /* copy inputs to device */ checkCUDA( hipMemcpy( d_in, h_in, memBytes, hipMemcpyHostToDevice ) ); checkCUDA( hipMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) ); /* calculate block and grid sizes */ dim3 threads( THREADS_PER_BLOCK, 1, 1); /* choose blocksize such that it will be smaller than the max that this GPU allows */ int blk = min( (size / threads.x) + 1, deviceProp.maxGridSize[0] ); dim3 blocks( blk, 1, 1); /* start the timers */ hipEvent_t start, stop; checkCUDA( hipEventCreate( &start ) ); checkCUDA( hipEventCreate( &stop ) ); checkCUDA( hipEventRecord( start, 0 ) ); /* launch the kernel on the GPU */ hipLaunchKernelGGL(( sumReduction), dim3(blocks), dim3(threads) , 0, 0, size, d_in, d_sum ); checkKERNEL() /* stop the timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); float elapsedTime; checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU perf data */ printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)* (double)size * 1.e-9 ); printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime, sizeof(FLOATTYPE_T) * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9); /* copy result back to host */ checkCUDA( hipMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T), hipMemcpyDeviceToHost ) ); /* calculate CPU results */ checkCUDA( hipEventRecord( start, 0 ) ); for( int i = 0; i < size; i++ ) { cpu_sum += h_in[i]; } /* end for */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print CPU perf data */ printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime, sizeof(FLOATTYPE_T) * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9); /* calculate error */ FLOATTYPE_T diff = abs( cpu_sum - h_sum ); if( diff / abs(h_sum) < 0.001 ) printf("PASS\n"); else { printf("FAIL\n"); printf("Error is %f\n", diff / h_sum ); } /* end else */ /* clean up */ free(h_in); checkCUDA( hipFree( d_in ) ); checkCUDA( hipFree( d_sum ) ); checkCUDA( hipDeviceReset() ); return 0; } /* end main */
4154154ba694a5a61cc4e3f018b4778ce65770ae.cu
/* * Copyright 2016 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "../debug.h" #define N ( 1 << 27 ) #define THREADS_PER_BLOCK 256 /* only works for float currently. double will break this code due to lack of 64bit floating point atomics */ #define FLOATTYPE_T float /* sumReduction kernel using atomics */ __global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *sum) { /* calculate global index in the array */ int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; /* return if my global index is larger than the array size */ if( globalIndex >= n ) return; /* grid stride loop where array is larger than number of threads * launched, using atomics */ for( int i = globalIndex; i < n; i += blockDim.x * gridDim.x ) { atomicAdd( sum, in[i] ); } /* end for */ return; } int main() { FLOATTYPE_T *h_in, h_sum, cpu_sum; FLOATTYPE_T *d_in, *d_sum; int size = N; int memBytes = size * sizeof( FLOATTYPE_T ); /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); /* allocate space for device copies of in, out */ checkCUDA( cudaMalloc( &d_in, memBytes ) ); checkCUDA( cudaMalloc( &d_sum, sizeof(FLOATTYPE_T) ) ); /* allocate space for host copies of in, out and setup input values */ h_in = (FLOATTYPE_T *)malloc( memBytes ); for( int i = 0; i < size; i++ ) { h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 ); if( i % 2 == 0 ) h_in[i] = -h_in[i]; } h_sum = 0.0; cpu_sum = 0.0; /* copy inputs to device */ checkCUDA( cudaMemcpy( d_in, h_in, memBytes, cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) ); /* calculate block and grid sizes */ dim3 threads( THREADS_PER_BLOCK, 1, 1); /* choose blocksize such that it will be smaller than the max that this GPU allows */ int blk = min( (size / threads.x) + 1, deviceProp.maxGridSize[0] ); dim3 blocks( blk, 1, 1); /* start the timers */ cudaEvent_t start, stop; checkCUDA( cudaEventCreate( &start ) ); checkCUDA( cudaEventCreate( &stop ) ); checkCUDA( cudaEventRecord( start, 0 ) ); /* launch the kernel on the GPU */ sumReduction<<< blocks, threads >>>( size, d_in, d_sum ); checkKERNEL() /* stop the timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); float elapsedTime; checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU perf data */ printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)* (double)size * 1.e-9 ); printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime, sizeof(FLOATTYPE_T) * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9); /* copy result back to host */ checkCUDA( cudaMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T), cudaMemcpyDeviceToHost ) ); /* calculate CPU results */ checkCUDA( cudaEventRecord( start, 0 ) ); for( int i = 0; i < size; i++ ) { cpu_sum += h_in[i]; } /* end for */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print CPU perf data */ printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime, sizeof(FLOATTYPE_T) * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9); /* calculate error */ FLOATTYPE_T diff = abs( cpu_sum - h_sum ); if( diff / abs(h_sum) < 0.001 ) printf("PASS\n"); else { printf("FAIL\n"); printf("Error is %f\n", diff / h_sum ); } /* end else */ /* clean up */ free(h_in); checkCUDA( cudaFree( d_in ) ); checkCUDA( cudaFree( d_sum ) ); checkCUDA( cudaDeviceReset() ); return 0; } /* end main */
8488421f7ec513c5d2f52d9c48fccaf584cc829f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/DistributionTemplates.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THH/THHGeneral.h> #include <THH/THHApply.cuh> #include <THH/THHDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in hiprand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state) * in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the * register spilling problem. */ namespace { template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda)); }); } struct curand_uniform_wrapper { hiprandStatePhilox4_32_10_t &state; __device__ curand_uniform_wrapper(hiprandStatePhilox4_32_10_t &state): state(state) {} __device__ float operator()() { uint32_t val = hiprand(&state); //need just bits constexpr auto MASK = static_cast<uint32_t>((static_cast<uint64_t>(1) << std::numeric_limits<float>::digits) - 1); constexpr auto DIVISOR = static_cast<float>(1) / (static_cast<uint32_t>(1) << std::numeric_limits<float>::digits); return (val & MASK) * DIVISOR; } }; template <typename scalar_t> void binomial_cuda_kernel( at::Tensor& ret, const at::Tensor& count, const at::Tensor& prob, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(count) .add_input(prob) .build(); at::native::distribution_binary_kernel(iter, seeds, [seeds] GPU_LAMBDA (hiprandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) auto uniform_lambda = curand_uniform_wrapper(state); BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform); return static_cast<scalar_t>(sample); #else return count; // useless. #endif } ); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return hiprand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return hiprand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true); at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(gamma) .add_input(gamma_sum) .build(); at::native::gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) { auto ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; return ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(20); } Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs); }); return ret; } Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(42); } Tensor ret = at::empty(count.sizes(), count.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] { binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(self) .add_input(output) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) { return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); }); return ret; } Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) { Tensor ret = at::empty(x.sizes(), x.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(x) .add_input(alpha) .add_input(total) .build(); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); }); }); return ret; } }} // namespace at::native
8488421f7ec513c5d2f52d9c48fccaf584cc829f.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/DistributionTemplates.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THC/THCGeneral.h> #include <THC/THCApply.cuh> #include <THC/THCDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in curand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state) * in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using curand distributions that utilize curand4 call. curand4 call doesn't have the * register spilling problem. */ namespace { template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda)); }); } struct curand_uniform_wrapper { curandStatePhilox4_32_10_t &state; __device__ curand_uniform_wrapper(curandStatePhilox4_32_10_t &state): state(state) {} __device__ float operator()() { uint32_t val = curand(&state); //need just bits constexpr auto MASK = static_cast<uint32_t>((static_cast<uint64_t>(1) << std::numeric_limits<float>::digits) - 1); constexpr auto DIVISOR = static_cast<float>(1) / (static_cast<uint32_t>(1) << std::numeric_limits<float>::digits); return (val & MASK) * DIVISOR; } }; template <typename scalar_t> void binomial_cuda_kernel( at::Tensor& ret, const at::Tensor& count, const at::Tensor& prob, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(count) .add_input(prob) .build(); at::native::distribution_binary_kernel(iter, seeds, [seeds] GPU_LAMBDA (curandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) auto uniform_lambda = curand_uniform_wrapper(state); BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform); return static_cast<scalar_t>(sample); #else return count; // useless. #endif } ); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return curand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return curand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true); at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(gamma) .add_input(gamma_sum) .build(); at::native::gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) { auto ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; return ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(20); } Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs); }); return ret; } Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(42); } Tensor ret = at::empty(count.sizes(), count.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] { binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(self) .add_input(output) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) { return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); }); return ret; } Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) { Tensor ret = at::empty(x.sizes(), x.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(x) .add_input(alpha) .add_input(total) .build(); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); }); }); return ret; } }} // namespace at::native
8ae88809b02719cf107ef599082e4c88d19fd3a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Raster.cuh" #include "TensorflowOp_generated.h" namespace MNN { namespace CUDA { template <typename T> __global__ void pack_c4(const T *input, T *output, int inside, int axis, int outside, int axisC4) { int total = inside * axis * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int x = i % inside; int tmp = i / inside; int y = tmp % axis; int z = tmp / axis; int y4 = y / 4; int yR = y % 4; int dstOffset = 4 * (z * axisC4 * inside + y4 * inside + x) + yR; output[dstOffset] = input[i]; } } void PackC4(uint8_t* output, const uint8_t* input, int inside, int axis, int outside, int bytes, CUDARuntime* runtime) { auto packAxis = (axis + 3) / 4; if (axis % 4 != 0) { runtime->memset(output, 0, inside * packAxis * 4 * outside * bytes); } int block_num = runtime->blocks_num(inside * axis * outside); int threads_num = runtime->threads_num(); switch (bytes) { case 4: hipLaunchKernelGGL(( pack_c4), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside, packAxis); break; case 2: hipLaunchKernelGGL(( pack_c4), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, inside, axis, outside, packAxis); break; case 1: hipLaunchKernelGGL(( pack_c4), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, inside, axis, outside, packAxis); break; default: break; } } template <typename T> __global__ void unpack_c4(const T *input, T *output, int inside, int axis, int outside, int axisC4) { int total = inside * axis * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int x = i % inside; int tmp = i / inside; int y = tmp % axis; int z = tmp / axis; int y4 = y / 4; int yR = y % 4; int srcOffset = 4 * (z * axisC4 * inside + y4 * inside + x) + yR; output[i] = input[srcOffset]; } } void UnpackC4(uint8_t* output, const uint8_t* input, int inside, int axis, int outside, int bytes, CUDARuntime* runtime) { auto packAxis = (axis + 3) / 4; int block_num = runtime->blocks_num(inside * axis * outside); int threads_num = runtime->threads_num(); switch (bytes) { case 4: hipLaunchKernelGGL(( unpack_c4), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside, packAxis); break; case 2: hipLaunchKernelGGL(( unpack_c4), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, inside, axis, outside, packAxis); break; case 1: hipLaunchKernelGGL(( unpack_c4), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, inside, axis, outside, packAxis); break; default: break; } } // Blit don't care offset template <typename T> __global__ void blitRegion(const T *inputO, T *outputO, int loopCount, const int32_t* dstIndice, const int32_t* srcIndice, int dstUseIndice, int srcUseIndice, int dstStep, int srcStep,int srcLimit, int sizeZ, int sizeY, int sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { int total = loopCount; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int srcOffsetO = i * srcStep; if (srcUseIndice >= 0) { srcOffsetO = srcIndice[i] * srcStep; } int dstOffsetO = i * dstStep; if (dstUseIndice >= 0) { dstOffsetO = dstIndice[i] * dstStep; } if (srcOffsetO >= 0 && srcOffsetO < srcLimit) { const T* input = inputO + srcOffsetO; T* output = outputO + dstOffsetO; for (int z=0; z<sizeZ; ++z) { for (int y=0; y<sizeY; ++y) { for (int x=0; x<sizeX; ++x) { int srcOffset = z * strideZ + y * strideY + x * strideX; int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = input[srcOffset]; } } } } else { T* output = outputO + dstOffsetO; for (int z=0; z<sizeZ; ++z) { for (int y=0; y<sizeY; ++y) { for (int x=0; x<sizeX; ++x) { int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = (T)0; } } } } } } void BlitWithIndice(uint8_t* output, const uint8_t* input, const int32_t* dstIndices, const int32_t* srcIndices, int dstUseIndice, int srcUseIndice, int loopCount, int dstStep, int srcStep, int srcLimit, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) { int count = loopCount; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 4: hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 2: hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 1: hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; default: break; } } #define UNARY_FUNC(Name, Func)\ template<typename T>\ __global__ void Name(const T *input, T *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int total = sizeZ * sizeY * sizeX;\ int ix = i % sizeX;\ int tmp = i / sizeX;\ int iy = tmp % sizeY;\ int iz = tmp / sizeY;\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ T x = input[srcOffset];\ output[dstOffset] = Func;\ }\ }\ UNARY_FUNC(blit, x); UNARY_FUNC(ABS, abs(x)); UNARY_FUNC(EXP, exp(x)); UNARY_FUNC(NEG, -x); UNARY_FUNC(RECIPROCAL, (T)(1.0)/x); UNARY_FUNC(FLOOR, floor(x)); UNARY_FUNC(CEIL, ceil(x)); UNARY_FUNC(SQUARE, x*x); UNARY_FUNC(SQRT, (T)(sqrt((float)x))); UNARY_FUNC(RSQRT, (T)(rsqrt((float)x))); UNARY_FUNC(LOG, (T)(log((float)x))); UNARY_FUNC(SIN, (T)(sin((float)x))); UNARY_FUNC(COS, (T)(cos((float)x))); UNARY_FUNC(TAN, (T)(tan((float)x))); UNARY_FUNC(ASIN, (T)(asin((float)x))); UNARY_FUNC(ACOS, (T)(acos((float)x))); UNARY_FUNC(ATAN, (T)(atan((float)x))); UNARY_FUNC(LOG1P, log(1+x)); UNARY_FUNC(TANH, tanh(x)); UNARY_FUNC(SIGMOID, 1./(1.+exp(-x))); UNARY_FUNC(EXPM1, exp(x)-1); UNARY_FUNC(ATANH, atanh(x)); UNARY_FUNC(ACOSH, acosh(x)); UNARY_FUNC(COSH, cosh(x)); UNARY_FUNC(SIGN, x > 0 ? 1 : (x<0 ? -1 : 0)); UNARY_FUNC(ROUND, round(x)); UNARY_FUNC(SINH, sinh(x)); UNARY_FUNC(ASINH, asinh(x)); UNARY_FUNC(HARDSWISH, 1.0/6.0 * x * min(max(x+3.0, 0.0), 6.0)); void RasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 4: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } } template<typename T> __global__ void fuseblit(const T *input, T *output, int fuseNum, const int32_t* sliceOffset, int sizeZ, int sizeY, int sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { int count = fuseNum*sizeZ * sizeY * sizeX; for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < (count); c += blockDim.x * gridDim.x) { int j = c / (sizeZ * sizeY * sizeX); int i = c % (sizeZ * sizeY * sizeX); int ix = i % sizeX; int tmp = i / sizeX; int iy = tmp % sizeY; int iz = tmp / sizeY; int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + ix * strideX; int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX; output[dst_offset] = input[src_offset]; } } void FuseRasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int fuseNum, void* sliceOffset, int bytes, CUDARuntime* runtime) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); int numBlocks = block_num; int threadsPerBlock = threads_num; // dim3 numBlocks(block_num, fuseNum); // dim3 threadsPerBlock(threads_num, 1); switch (bytes) { case 4: hipLaunchKernelGGL(( fuseblit), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const float*)input, (float*)output, fuseNum, (const int32_t*)sliceOffset, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: hipLaunchKernelGGL(( fuseblit), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const int16_t*)input, (int16_t*)output, fuseNum, (const int32_t*)sliceOffset, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: hipLaunchKernelGGL(( fuseblit), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const int8_t*)input, (int8_t*)output, fuseNum, (const int32_t*)sliceOffset, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } //printf("%s, %d-%d-%d-%d\n", hipGetErrorString(hipGetLastError()), numBlocks.x, numBlocks.y, threadsPerBlock.x, threadsPerBlock.y); } void UnaryBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); // TODO: Support FP16 MNN_ASSERT(bytes==4); #define COMPUTE(TYPE)\ if (opType == MNN::UnaryOpOperation_##TYPE ) {\ hipLaunchKernelGGL(( TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ dstStride[0], dstStride[1], dstStride[2]);\ return;\ }\ COMPUTE(ABS); COMPUTE(NEG); COMPUTE(FLOOR); COMPUTE(CEIL); COMPUTE(SQUARE); COMPUTE(SQRT); COMPUTE(RSQRT); COMPUTE(EXP); COMPUTE(LOG); COMPUTE(SIN); COMPUTE(COS); COMPUTE(TAN); COMPUTE(ASIN); COMPUTE(ACOS); COMPUTE(ATAN); COMPUTE(RECIPROCAL); COMPUTE(LOG1P); COMPUTE(TANH); COMPUTE(SIGMOID); COMPUTE(EXPM1); COMPUTE(ACOSH); COMPUTE(ATANH); COMPUTE(SIGN); COMPUTE(COSH); COMPUTE(ROUND); COMPUTE(SINH); COMPUTE(ASINH); COMPUTE(HARDSWISH); #undef COMPUTE } #define BINARY_FUNC(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void Binary##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int strideZ1, int strideY1, int strideX1,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int total = sizeZ * sizeY * sizeX;\ int ix = i % sizeX;\ int tmp = i / sizeX;\ int iy = tmp % sizeY;\ int iz = tmp / sizeY;\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ TIn x = input0[srcOffset];\ TIn y = input1[srcOffset1];\ output[dstOffset] = (TOut)Func;\ }\ }\ #define sign(y) ((y) > 0 ? 1 : ((y) < 0 ? -1 : 0)) BINARY_FUNC(ADD, x+y); BINARY_FUNC(SUB, x-y); BINARY_FUNC(MUL, x*y); BINARY_FUNC(DIV, x/y); BINARY_FUNC(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC(MINIMUM, min(x, y)); BINARY_FUNC(MAXIMUM, max(x, y)); BINARY_FUNC(GREATER, x > y ? 1 : 0); BINARY_FUNC(LESS, x < y ? 1 : 0); BINARY_FUNC(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC(EQUAL, x == y ? 1 : 0); BINARY_FUNC(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC(FLOORDIV, floor(x / y)); BINARY_FUNC(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC(POW, pow(x, y)); BINARY_FUNC(ATAN2, atan2(x, y)); BINARY_FUNC(MOD, x - x / y); BINARY_FUNC(LOGICALOR, (x || y) ? 1 : 0); void BinaryBlitTemplateFloat(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); // TODO: Support FP16 MNN_ASSERT(bytes==4); #define COMPUTE_FLOAT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ hipLaunchKernelGGL(( Binary##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (const float*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2]);\ return;\ }\ COMPUTE_FLOAT(ADD, float); COMPUTE_FLOAT(SUB, float); COMPUTE_FLOAT(MUL, float); COMPUTE_FLOAT(DIV, float); COMPUTE_FLOAT(REALDIV, float); COMPUTE_FLOAT(MINIMUM, float); COMPUTE_FLOAT(MAXIMUM, float); COMPUTE_FLOAT(GREATER, int); COMPUTE_FLOAT(LESS, int); COMPUTE_FLOAT(LESS_EQUAL, int); COMPUTE_FLOAT(GREATER_EQUAL, int); COMPUTE_FLOAT(EQUAL, int); COMPUTE_FLOAT(NOTEQUAL, int); COMPUTE_FLOAT(FLOORDIV, float); COMPUTE_FLOAT(FLOORMOD, float); COMPUTE_FLOAT(POW, float); COMPUTE_FLOAT(SquaredDifference, float); COMPUTE_FLOAT(ATAN2, float); COMPUTE_FLOAT(MOD, float); } void BinaryBlitTemplateInt32(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); #define COMPUTE_INT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ hipLaunchKernelGGL(( Binary##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const int*)input, (const int*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2]);\ return;\ }\ COMPUTE_INT(ADD, int); COMPUTE_INT(SUB, int); COMPUTE_INT(MUL, int); COMPUTE_INT(DIV, int); COMPUTE_INT(MINIMUM, int); COMPUTE_INT(MAXIMUM, int); COMPUTE_INT(GREATER, int); COMPUTE_INT(LESS, int); COMPUTE_INT(LESS_EQUAL, int); COMPUTE_INT(GREATER_EQUAL, int); COMPUTE_INT(EQUAL, int); COMPUTE_INT(NOTEQUAL, int); COMPUTE_INT(SquaredDifference, int); COMPUTE_INT(MOD, int); COMPUTE_INT(LOGICALOR, int); } void BinaryBlit(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, halide_type_t type, CUDARuntime* runtime, int opType) { if (type.code == halide_type_float) { BinaryBlitTemplateFloat(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType); } else if (type.code == halide_type_int) { BinaryBlitTemplateInt32(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType); } } }// namespace CUDA }// namespace MNN
8ae88809b02719cf107ef599082e4c88d19fd3a7.cu
#include "Raster.cuh" #include "TensorflowOp_generated.h" namespace MNN { namespace CUDA { template <typename T> __global__ void pack_c4(const T *input, T *output, int inside, int axis, int outside, int axisC4) { int total = inside * axis * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int x = i % inside; int tmp = i / inside; int y = tmp % axis; int z = tmp / axis; int y4 = y / 4; int yR = y % 4; int dstOffset = 4 * (z * axisC4 * inside + y4 * inside + x) + yR; output[dstOffset] = input[i]; } } void PackC4(uint8_t* output, const uint8_t* input, int inside, int axis, int outside, int bytes, CUDARuntime* runtime) { auto packAxis = (axis + 3) / 4; if (axis % 4 != 0) { runtime->memset(output, 0, inside * packAxis * 4 * outside * bytes); } int block_num = runtime->blocks_num(inside * axis * outside); int threads_num = runtime->threads_num(); switch (bytes) { case 4: pack_c4<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside, packAxis); break; case 2: pack_c4<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, inside, axis, outside, packAxis); break; case 1: pack_c4<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, inside, axis, outside, packAxis); break; default: break; } } template <typename T> __global__ void unpack_c4(const T *input, T *output, int inside, int axis, int outside, int axisC4) { int total = inside * axis * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int x = i % inside; int tmp = i / inside; int y = tmp % axis; int z = tmp / axis; int y4 = y / 4; int yR = y % 4; int srcOffset = 4 * (z * axisC4 * inside + y4 * inside + x) + yR; output[i] = input[srcOffset]; } } void UnpackC4(uint8_t* output, const uint8_t* input, int inside, int axis, int outside, int bytes, CUDARuntime* runtime) { auto packAxis = (axis + 3) / 4; int block_num = runtime->blocks_num(inside * axis * outside); int threads_num = runtime->threads_num(); switch (bytes) { case 4: unpack_c4<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside, packAxis); break; case 2: unpack_c4<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, inside, axis, outside, packAxis); break; case 1: unpack_c4<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, inside, axis, outside, packAxis); break; default: break; } } // Blit don't care offset template <typename T> __global__ void blitRegion(const T *inputO, T *outputO, int loopCount, const int32_t* dstIndice, const int32_t* srcIndice, int dstUseIndice, int srcUseIndice, int dstStep, int srcStep,int srcLimit, int sizeZ, int sizeY, int sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { int total = loopCount; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int srcOffsetO = i * srcStep; if (srcUseIndice >= 0) { srcOffsetO = srcIndice[i] * srcStep; } int dstOffsetO = i * dstStep; if (dstUseIndice >= 0) { dstOffsetO = dstIndice[i] * dstStep; } if (srcOffsetO >= 0 && srcOffsetO < srcLimit) { const T* input = inputO + srcOffsetO; T* output = outputO + dstOffsetO; for (int z=0; z<sizeZ; ++z) { for (int y=0; y<sizeY; ++y) { for (int x=0; x<sizeX; ++x) { int srcOffset = z * strideZ + y * strideY + x * strideX; int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = input[srcOffset]; } } } } else { T* output = outputO + dstOffsetO; for (int z=0; z<sizeZ; ++z) { for (int y=0; y<sizeY; ++y) { for (int x=0; x<sizeX; ++x) { int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = (T)0; } } } } } } void BlitWithIndice(uint8_t* output, const uint8_t* input, const int32_t* dstIndices, const int32_t* srcIndices, int dstUseIndice, int srcUseIndice, int loopCount, int dstStep, int srcStep, int srcLimit, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) { int count = loopCount; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 4: blitRegion<<<block_num, threads_num>>>((const float*)input, (float*)output, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 2: blitRegion<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 1: blitRegion<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; default: break; } } #define UNARY_FUNC(Name, Func)\ template<typename T>\ __global__ void Name(const T *input, T *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int total = sizeZ * sizeY * sizeX;\ int ix = i % sizeX;\ int tmp = i / sizeX;\ int iy = tmp % sizeY;\ int iz = tmp / sizeY;\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ T x = input[srcOffset];\ output[dstOffset] = Func;\ }\ }\ UNARY_FUNC(blit, x); UNARY_FUNC(ABS, abs(x)); UNARY_FUNC(EXP, exp(x)); UNARY_FUNC(NEG, -x); UNARY_FUNC(RECIPROCAL, (T)(1.0)/x); UNARY_FUNC(FLOOR, floor(x)); UNARY_FUNC(CEIL, ceil(x)); UNARY_FUNC(SQUARE, x*x); UNARY_FUNC(SQRT, (T)(sqrt((float)x))); UNARY_FUNC(RSQRT, (T)(rsqrt((float)x))); UNARY_FUNC(LOG, (T)(log((float)x))); UNARY_FUNC(SIN, (T)(sin((float)x))); UNARY_FUNC(COS, (T)(cos((float)x))); UNARY_FUNC(TAN, (T)(tan((float)x))); UNARY_FUNC(ASIN, (T)(asin((float)x))); UNARY_FUNC(ACOS, (T)(acos((float)x))); UNARY_FUNC(ATAN, (T)(atan((float)x))); UNARY_FUNC(LOG1P, log(1+x)); UNARY_FUNC(TANH, tanh(x)); UNARY_FUNC(SIGMOID, 1./(1.+exp(-x))); UNARY_FUNC(EXPM1, exp(x)-1); UNARY_FUNC(ATANH, atanh(x)); UNARY_FUNC(ACOSH, acosh(x)); UNARY_FUNC(COSH, cosh(x)); UNARY_FUNC(SIGN, x > 0 ? 1 : (x<0 ? -1 : 0)); UNARY_FUNC(ROUND, round(x)); UNARY_FUNC(SINH, sinh(x)); UNARY_FUNC(ASINH, asinh(x)); UNARY_FUNC(HARDSWISH, 1.0/6.0 * x * min(max(x+3.0, 0.0), 6.0)); void RasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 4: blit<<<block_num, threads_num>>>((const float*)input, (float*)output, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: blit<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: blit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } } template<typename T> __global__ void fuseblit(const T *input, T *output, int fuseNum, const int32_t* sliceOffset, int sizeZ, int sizeY, int sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { int count = fuseNum*sizeZ * sizeY * sizeX; for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < (count); c += blockDim.x * gridDim.x) { int j = c / (sizeZ * sizeY * sizeX); int i = c % (sizeZ * sizeY * sizeX); int ix = i % sizeX; int tmp = i / sizeX; int iy = tmp % sizeY; int iz = tmp / sizeY; int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + ix * strideX; int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX; output[dst_offset] = input[src_offset]; } } void FuseRasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int fuseNum, void* sliceOffset, int bytes, CUDARuntime* runtime) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); int numBlocks = block_num; int threadsPerBlock = threads_num; // dim3 numBlocks(block_num, fuseNum); // dim3 threadsPerBlock(threads_num, 1); switch (bytes) { case 4: fuseblit<<<numBlocks, threadsPerBlock>>>((const float*)input, (float*)output, fuseNum, (const int32_t*)sliceOffset, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: fuseblit<<<numBlocks, threadsPerBlock>>>((const int16_t*)input, (int16_t*)output, fuseNum, (const int32_t*)sliceOffset, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: fuseblit<<<numBlocks, threadsPerBlock>>>((const int8_t*)input, (int8_t*)output, fuseNum, (const int32_t*)sliceOffset, size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } //printf("%s, %d-%d-%d-%d\n", cudaGetErrorString(cudaGetLastError()), numBlocks.x, numBlocks.y, threadsPerBlock.x, threadsPerBlock.y); } void UnaryBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); // TODO: Support FP16 MNN_ASSERT(bytes==4); #define COMPUTE(TYPE)\ if (opType == MNN::UnaryOpOperation_##TYPE ) {\ TYPE<<<block_num, threads_num>>>((const float*)input, (float*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ dstStride[0], dstStride[1], dstStride[2]);\ return;\ }\ COMPUTE(ABS); COMPUTE(NEG); COMPUTE(FLOOR); COMPUTE(CEIL); COMPUTE(SQUARE); COMPUTE(SQRT); COMPUTE(RSQRT); COMPUTE(EXP); COMPUTE(LOG); COMPUTE(SIN); COMPUTE(COS); COMPUTE(TAN); COMPUTE(ASIN); COMPUTE(ACOS); COMPUTE(ATAN); COMPUTE(RECIPROCAL); COMPUTE(LOG1P); COMPUTE(TANH); COMPUTE(SIGMOID); COMPUTE(EXPM1); COMPUTE(ACOSH); COMPUTE(ATANH); COMPUTE(SIGN); COMPUTE(COSH); COMPUTE(ROUND); COMPUTE(SINH); COMPUTE(ASINH); COMPUTE(HARDSWISH); #undef COMPUTE } #define BINARY_FUNC(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void Binary##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int strideZ1, int strideY1, int strideX1,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int total = sizeZ * sizeY * sizeX;\ int ix = i % sizeX;\ int tmp = i / sizeX;\ int iy = tmp % sizeY;\ int iz = tmp / sizeY;\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ TIn x = input0[srcOffset];\ TIn y = input1[srcOffset1];\ output[dstOffset] = (TOut)Func;\ }\ }\ #define sign(y) ((y) > 0 ? 1 : ((y) < 0 ? -1 : 0)) BINARY_FUNC(ADD, x+y); BINARY_FUNC(SUB, x-y); BINARY_FUNC(MUL, x*y); BINARY_FUNC(DIV, x/y); BINARY_FUNC(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC(MINIMUM, min(x, y)); BINARY_FUNC(MAXIMUM, max(x, y)); BINARY_FUNC(GREATER, x > y ? 1 : 0); BINARY_FUNC(LESS, x < y ? 1 : 0); BINARY_FUNC(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC(EQUAL, x == y ? 1 : 0); BINARY_FUNC(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC(FLOORDIV, floor(x / y)); BINARY_FUNC(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC(POW, pow(x, y)); BINARY_FUNC(ATAN2, atan2(x, y)); BINARY_FUNC(MOD, x - x / y); BINARY_FUNC(LOGICALOR, (x || y) ? 1 : 0); void BinaryBlitTemplateFloat(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); // TODO: Support FP16 MNN_ASSERT(bytes==4); #define COMPUTE_FLOAT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ Binary##TYPE<<<block_num, threads_num>>>((const float*)input, (const float*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2]);\ return;\ }\ COMPUTE_FLOAT(ADD, float); COMPUTE_FLOAT(SUB, float); COMPUTE_FLOAT(MUL, float); COMPUTE_FLOAT(DIV, float); COMPUTE_FLOAT(REALDIV, float); COMPUTE_FLOAT(MINIMUM, float); COMPUTE_FLOAT(MAXIMUM, float); COMPUTE_FLOAT(GREATER, int); COMPUTE_FLOAT(LESS, int); COMPUTE_FLOAT(LESS_EQUAL, int); COMPUTE_FLOAT(GREATER_EQUAL, int); COMPUTE_FLOAT(EQUAL, int); COMPUTE_FLOAT(NOTEQUAL, int); COMPUTE_FLOAT(FLOORDIV, float); COMPUTE_FLOAT(FLOORMOD, float); COMPUTE_FLOAT(POW, float); COMPUTE_FLOAT(SquaredDifference, float); COMPUTE_FLOAT(ATAN2, float); COMPUTE_FLOAT(MOD, float); } void BinaryBlitTemplateInt32(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); #define COMPUTE_INT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ Binary##TYPE<<<block_num, threads_num>>>((const int*)input, (const int*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2]);\ return;\ }\ COMPUTE_INT(ADD, int); COMPUTE_INT(SUB, int); COMPUTE_INT(MUL, int); COMPUTE_INT(DIV, int); COMPUTE_INT(MINIMUM, int); COMPUTE_INT(MAXIMUM, int); COMPUTE_INT(GREATER, int); COMPUTE_INT(LESS, int); COMPUTE_INT(LESS_EQUAL, int); COMPUTE_INT(GREATER_EQUAL, int); COMPUTE_INT(EQUAL, int); COMPUTE_INT(NOTEQUAL, int); COMPUTE_INT(SquaredDifference, int); COMPUTE_INT(MOD, int); COMPUTE_INT(LOGICALOR, int); } void BinaryBlit(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, halide_type_t type, CUDARuntime* runtime, int opType) { if (type.code == halide_type_float) { BinaryBlitTemplateFloat(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType); } else if (type.code == halide_type_int) { BinaryBlitTemplateInt32(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType); } } }// namespace CUDA }// namespace MNN
83c1f8ab0fe3bc56506dba396015e85e7f37e82f.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; // Functions void CleanupResources(void); void RandomInit(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1 = 0; float Value2 = 0; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Division access if((i%32)<=23){ for(unsigned k=0; k<iterations;k++) { Value1=I1/I2; Value3=I1/I2; Value1/=Value2; Value1/=Value2; Value2=Value3/Value1; Value1=Value2/Value3; } } __syncthreads(); Value=Value1; C[i]=Value+Value2; } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
83c1f8ab0fe3bc56506dba396015e85e7f37e82f.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; // Functions void CleanupResources(void); void RandomInit(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1 = 0; float Value2 = 0; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Division access if((i%32)<=23){ for(unsigned k=0; k<iterations;k++) { Value1=I1/I2; Value3=I1/I2; Value1/=Value2; Value1/=Value2; Value2=Value3/Value1; Value1=Value2/Value3; } } __syncthreads(); Value=Value1; C[i]=Value+Value2; } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
187c390fe553d988c30f69d4d1d3e068c63766fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * The MIT License * * Copyright (c) 1997-2015 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <sci_defs/cuda_defs.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Parallel/Parallel.h> #include <Core/Util/GPU.h> #include <CCA/Components/Schedulers/GPUDataWarehouse.h> namespace Uintah { //______________________________________________________________________ // // @brief A GPU kernel for the Jacobi iterations in the Poisson 1-material solver // @param patchID the patch this kernel will operate over // @param matlIndex the material associated with the specified patchID // @param domainLow a three component vector that gives the lower corner of the work area as (x,y,z) // @param domainHigh a three component vector that gives the highest corner of the work area as (x,y,z) // @param old_gpudw the old GPU DataWarehouse // @param new_gpudw the new GPU DataWarehouse __global__ void unifiedSchedulerTestKernel( int patchID, uint3 patchNodeLowIndex, uint3 patchNodeHighIndex, uint3 domainLow, uint3 domainHigh, GPUDataWarehouse * old_gpudw, GPUDataWarehouse * new_gpudw, hipStream_t * stream) { const GPUGridVariable<double> phi; GPUGridVariable<double> newphi; old_gpudw->get(phi, "phi", patchID, 0); new_gpudw->getModifiable(newphi, "phi", patchID, 0); // calculate the thread indices int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; // If the threads are within the bounds of the patch // the algorithm is allowed to stream along the z direction // applying the stencil to a line of cells. The z direction // is streamed because it allows access of x and y elements // that are close to one another which should allow coalesced // memory accesses. // We also need to copy the boundary cells on the z faces. // These outer cells don't get computed, just preserved across iterations // newphi(i,j,k) = phi(i,j,k) if ((domainLow.x - patchNodeLowIndex.x == 1 && i == patchNodeLowIndex.x) || (domainLow.y - patchNodeLowIndex.y == 1 && j == patchNodeLowIndex.y) || (patchNodeHighIndex.x - domainHigh.x == 1 && i == patchNodeHighIndex.x - 1) || (patchNodeHighIndex.y - domainHigh.y == 1 && j == patchNodeHighIndex.y - 1)) { for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = phi(i,j,k); } } if(i >= patchNodeLowIndex.x && j >= patchNodeLowIndex.y && i < patchNodeHighIndex.x && j < patchNodeHighIndex.y ) { if (domainLow.z - patchNodeLowIndex.z == 1){ newphi(i,j,patchNodeLowIndex.z) = phi(i,j,patchNodeLowIndex.z); } if (patchNodeHighIndex.z - domainHigh.z == 1) { newphi(i,j,patchNodeHighIndex.z - 1) = phi(i,j,patchNodeHighIndex.z - 1); } } if(i >= domainLow.x && j >= domainLow.y && i < domainHigh.x && j < domainHigh.y ) { for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = (1. / 6) * (phi(i-1, j, k) + phi(i+1, j, k) + phi(i, j-1, k) + phi(i, j+1, k) + phi(i, j, k-1) + phi(i, j, k+1)); } } } void launchUnifiedSchedulerTestKernel( dim3 dimGrid, dim3 dimBlock, hipStream_t * stream, int patchID, uint3 patchNodeLowIndex, uint3 patchNodeHighIndex, uint3 domainLow, uint3 domainHigh, GPUDataWarehouse * old_gpudw, GPUDataWarehouse * new_gpudw) { hipLaunchKernelGGL(( unifiedSchedulerTestKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream, patchID, patchNodeLowIndex, patchNodeHighIndex, domainLow, domainHigh, old_gpudw, new_gpudw, stream ); } } //end namespace Uintah
187c390fe553d988c30f69d4d1d3e068c63766fb.cu
/* * The MIT License * * Copyright (c) 1997-2015 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <sci_defs/cuda_defs.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Parallel/Parallel.h> #include <Core/Util/GPU.h> #include <CCA/Components/Schedulers/GPUDataWarehouse.h> namespace Uintah { //______________________________________________________________________ // // @brief A GPU kernel for the Jacobi iterations in the Poisson 1-material solver // @param patchID the patch this kernel will operate over // @param matlIndex the material associated with the specified patchID // @param domainLow a three component vector that gives the lower corner of the work area as (x,y,z) // @param domainHigh a three component vector that gives the highest corner of the work area as (x,y,z) // @param old_gpudw the old GPU DataWarehouse // @param new_gpudw the new GPU DataWarehouse __global__ void unifiedSchedulerTestKernel( int patchID, uint3 patchNodeLowIndex, uint3 patchNodeHighIndex, uint3 domainLow, uint3 domainHigh, GPUDataWarehouse * old_gpudw, GPUDataWarehouse * new_gpudw, cudaStream_t * stream) { const GPUGridVariable<double> phi; GPUGridVariable<double> newphi; old_gpudw->get(phi, "phi", patchID, 0); new_gpudw->getModifiable(newphi, "phi", patchID, 0); // calculate the thread indices int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; // If the threads are within the bounds of the patch // the algorithm is allowed to stream along the z direction // applying the stencil to a line of cells. The z direction // is streamed because it allows access of x and y elements // that are close to one another which should allow coalesced // memory accesses. // We also need to copy the boundary cells on the z faces. // These outer cells don't get computed, just preserved across iterations // newphi(i,j,k) = phi(i,j,k) if ((domainLow.x - patchNodeLowIndex.x == 1 && i == patchNodeLowIndex.x) || (domainLow.y - patchNodeLowIndex.y == 1 && j == patchNodeLowIndex.y) || (patchNodeHighIndex.x - domainHigh.x == 1 && i == patchNodeHighIndex.x - 1) || (patchNodeHighIndex.y - domainHigh.y == 1 && j == patchNodeHighIndex.y - 1)) { for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = phi(i,j,k); } } if(i >= patchNodeLowIndex.x && j >= patchNodeLowIndex.y && i < patchNodeHighIndex.x && j < patchNodeHighIndex.y ) { if (domainLow.z - patchNodeLowIndex.z == 1){ newphi(i,j,patchNodeLowIndex.z) = phi(i,j,patchNodeLowIndex.z); } if (patchNodeHighIndex.z - domainHigh.z == 1) { newphi(i,j,patchNodeHighIndex.z - 1) = phi(i,j,patchNodeHighIndex.z - 1); } } if(i >= domainLow.x && j >= domainLow.y && i < domainHigh.x && j < domainHigh.y ) { for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = (1. / 6) * (phi(i-1, j, k) + phi(i+1, j, k) + phi(i, j-1, k) + phi(i, j+1, k) + phi(i, j, k-1) + phi(i, j, k+1)); } } } void launchUnifiedSchedulerTestKernel( dim3 dimGrid, dim3 dimBlock, cudaStream_t * stream, int patchID, uint3 patchNodeLowIndex, uint3 patchNodeHighIndex, uint3 domainLow, uint3 domainHigh, GPUDataWarehouse * old_gpudw, GPUDataWarehouse * new_gpudw) { unifiedSchedulerTestKernel<<< dimGrid, dimBlock, 0, *stream>>>( patchID, patchNodeLowIndex, patchNodeHighIndex, domainLow, domainHigh, old_gpudw, new_gpudw, stream ); } } //end namespace Uintah
89b3bf20d7a3cf0a89e3c95b5f93e9f24146a044.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "kernel.hip" #include "support.h" int main (int argc, char *argv[]) { Timer timer; hipError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned matArow, matAcol; unsigned matBrow, matBcol; dim3 dim_grid, dim_block; if (argc == 1) { matArow = 1000; matAcol = matBrow = 1000; matBcol = 1000; } else if (argc == 2) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[1]); matBcol = atoi(argv[1]); } else if (argc == 4) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[2]); matBcol = atoi(argv[3]); } else { printf("\n Invalid input parameters!" "\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000" "\n Usage: ./sgemm-tiled <m> # All matrices are m x m" "\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n" "\n"); exit(0); } A_sz = matArow*matAcol; B_sz = matBrow*matBcol; C_sz = matArow*matBcol; A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; } B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; } C_h = (float*) malloc( sizeof(float)*C_sz ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol, matBrow, matBcol, matArow, matBcol); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE hipMalloc((void**) &A_d, sizeof(float) * A_sz); hipMalloc((void**) &B_d, sizeof(float) * B_sz); hipMalloc((void**) &C_d, sizeof(float) * C_sz); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE hipMemcpy(A_d, A_h, sizeof(float) * A_sz, hipMemcpyHostToDevice); hipMemcpy(B_d, B_h, sizeof(float) * B_sz, hipMemcpyHostToDevice); hipMemcpy(C_d, C_h, sizeof(float) * C_sz, hipMemcpyHostToDevice); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel using standard sgemm interface --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \ A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE hipMemcpy(C_h, C_d, sizeof(float) * C_sz, hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); //verify(A_h, B_h, C_h, matArow, matAcol, matBcol); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); //INSERT CODE HERE hipFree(A_d); hipFree(B_d); hipFree(C_d); return 0; }
89b3bf20d7a3cf0a89e3c95b5f93e9f24146a044.cu
/****************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "kernel.cu" #include "support.h" int main (int argc, char *argv[]) { Timer timer; cudaError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned matArow, matAcol; unsigned matBrow, matBcol; dim3 dim_grid, dim_block; if (argc == 1) { matArow = 1000; matAcol = matBrow = 1000; matBcol = 1000; } else if (argc == 2) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[1]); matBcol = atoi(argv[1]); } else if (argc == 4) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[2]); matBcol = atoi(argv[3]); } else { printf("\n Invalid input parameters!" "\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000" "\n Usage: ./sgemm-tiled <m> # All matrices are m x m" "\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n" "\n"); exit(0); } A_sz = matArow*matAcol; B_sz = matBrow*matBcol; C_sz = matArow*matBcol; A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; } B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; } C_h = (float*) malloc( sizeof(float)*C_sz ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol, matBrow, matBcol, matArow, matBcol); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cudaMalloc((void**) &A_d, sizeof(float) * A_sz); cudaMalloc((void**) &B_d, sizeof(float) * B_sz); cudaMalloc((void**) &C_d, sizeof(float) * C_sz); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cudaMemcpy(A_d, A_h, sizeof(float) * A_sz, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B_h, sizeof(float) * B_sz, cudaMemcpyHostToDevice); cudaMemcpy(C_d, C_h, sizeof(float) * C_sz, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel using standard sgemm interface --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \ A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cudaMemcpy(C_h, C_d, sizeof(float) * C_sz, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); //verify(A_h, B_h, C_h, matArow, matAcol, matBcol); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); //INSERT CODE HERE cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); return 0; }
6b0e1024c0414fca672ee7e3a325eb29b538df45.hip
// !!! This is a file automatically generated by hipify!!! /* ECL-GC code: ECL-GC is a graph-coloring algorithm with shortcutting. The CUDA implementation thereof is quite fast. It operates on graphs stored in binary CSR format. Copyright (c) 2020, Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Ghadeer Alabandi, Evan Powers, and Martin Burtscher URL: The latest version of this code is available at https://userweb.cs.txstate.edu/~burtscher/research/ECL-GC/. Publication: This work is described in detail in the following paper. Ghadeer Alabandi, Evan Powers, and Martin Burtscher. Increasing the Parallelism of Graph Coloring via Shortcutting. Proceedings of the 2020 ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, pp. 262-275. February 2020. */ #include <cstdio> #include <cstdlib> #include <algorithm> #include <chrono> #include <hip/hip_runtime.h> #include "graph.h" static const int ThreadsPerBlock = 512; static const int WS = 32; // warp size and bits per int static const int MSB = 1 << (WS - 1); static const int Mask = (1 << (WS / 2)) - 1; // https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key static __device__ unsigned int hash(unsigned int val) { val = ((val >> 16) ^ val) * 0x45d9f3b; val = ((val >> 16) ^ val) * 0x45d9f3b; return (val >> 16) ^ val; } __global__ void init(const int nodes, const int edges, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nlist2, int* const __restrict__ posscol, int* const __restrict__ posscol2, int* const __restrict__ color, int* const __restrict__ wl, int* __restrict__ wlsize) { const int lane = threadIdx.x % WS; const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int threads = gridDim.x * ThreadsPerBlock; int maxrange = -1; for (int v = thread; __any(v < nodes); v += threads) { bool cond = false; int beg, end, pos, degv, active; if (v < nodes) { beg = nidx[v]; end = nidx[v + 1]; degv = end - beg; cond = (degv >= WS); if (cond) { wl[atomicAdd(wlsize, 1)] = v; } else { active = 0; pos = beg; for (int i = beg; i < end; i++) { const int nei = nlist[i]; const int degn = nidx[nei + 1] - nidx[nei]; if ((degv < degn) || ((degv == degn) && (hash(v) < hash(nei))) || ((degv == degn) && (hash(v) == hash(nei)) && (v < nei))) { active |= (unsigned int)MSB >> (i - beg); pos++; } } } } int bal = __ballot(cond); while (bal != 0) { const int who = __ffs(bal) - 1; bal &= bal - 1; const int wv = __shfl(v, who); const int wbeg = __shfl(beg, who); const int wend = __shfl(end, who); const int wdegv = wend - wbeg; int wpos = wbeg; for (int i = wbeg + lane; __any(i < wend); i += WS) { int wnei; bool prio = false; if (i < wend) { wnei = nlist[i]; const int wdegn = nidx[wnei + 1] - nidx[wnei]; prio = ((wdegv < wdegn) || ((wdegv == wdegn) && (hash(wv) < hash(wnei))) || ((wdegv == wdegn) && (hash(wv) == hash(wnei)) && (wv < wnei))); } const int b = __ballot(prio); const int offs = __popc(b & ((1 << lane) - 1)); if (prio) nlist2[wpos + offs] = wnei; wpos += __popc(b); } if (who == lane) pos = wpos; } if (v < nodes) { const int range = pos - beg; maxrange = max(maxrange, range); color[v] = (cond || (range == 0)) ? (range << (WS / 2)) : active; posscol[v] = (range >= WS) ? -1 : (MSB >> range); } } //if (maxrange >= Mask) printf("too many active neighbors\n"); for (int i = thread; i < edges / WS + 1; i += threads) posscol2[i] = -1; } __global__ void runLarge(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ posscol, int* const __restrict__ posscol2, volatile int* const __restrict__ color, const int* const __restrict__ wl, const int* __restrict__ wlsize) { const int stop = *wlsize; if (stop != 0) { const int lane = threadIdx.x % WS; const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int threads = gridDim.x * ThreadsPerBlock; bool again; do { again = false; for (int w = thread; __any(w < stop); w += threads) { bool shortcut, done, cond = false; int v, data, range, beg, pcol; if (w < stop) { v = wl[w]; data = color[v]; range = data >> (WS / 2); if (range > 0) { beg = nidx[v]; pcol = posscol[v]; cond = true; } } int bal = __ballot(cond); while (bal != 0) { const int who = __ffs(bal) - 1; bal &= bal - 1; const int wdata = __shfl(data, who); const int wrange = wdata >> (WS / 2); const int wbeg = __shfl(beg, who); const int wmincol = wdata & Mask; const int wmaxcol = wmincol + wrange; const int wend = wbeg + wmaxcol; const int woffs = wbeg / WS; int wpcol = __shfl(pcol, who); bool wshortcut = true; bool wdone = true; for (int i = wbeg + lane; __any(i < wend); i += WS) { int nei, neidata, neirange; if (i < wend) { nei = nlist[i]; neidata = color[nei]; neirange = neidata >> (WS / 2); const bool neidone = (neirange == 0); wdone &= neidone; //consolidated below if (neidone) { const int neicol = neidata; if (neicol < WS) { wpcol &= ~((unsigned int)MSB >> neicol); //consolidated below } else { if ((wmincol <= neicol) && (neicol < wmaxcol) && ((posscol2[woffs + neicol / WS] << (neicol % WS)) < 0)) { atomicAnd((int*)&posscol2[woffs + neicol / WS], ~((unsigned int)MSB >> (neicol % WS))); } } } else { const int neimincol = neidata & Mask; const int neimaxcol = neimincol + neirange; if ((neimincol <= wmincol) && (neimaxcol >= wmincol)) wshortcut = false; //consolidated below } } } wshortcut = __all(wshortcut); wdone = __all(wdone); wpcol &= __shfl_xor(wpcol, 1); wpcol &= __shfl_xor(wpcol, 2); wpcol &= __shfl_xor(wpcol, 4); wpcol &= __shfl_xor(wpcol, 8); wpcol &= __shfl_xor(wpcol, 16); if (who == lane) pcol = wpcol; if (who == lane) done = wdone; if (who == lane) shortcut = wshortcut; } if (w < stop) { if (range > 0) { const int mincol = data & Mask; int val = pcol, mc = 0; if (pcol == 0) { const int offs = beg / WS; mc = max(1, mincol / WS); while ((val = posscol2[offs + mc]) == 0) mc++; } int newmincol = mc * WS + __clz(val); if (mincol != newmincol) shortcut = false; if (shortcut || done) { pcol = (newmincol < WS) ? ((unsigned int)MSB >> newmincol) : 0; } else { const int maxcol = mincol + range; const int range = maxcol - newmincol; newmincol = (range << (WS / 2)) | newmincol; again = true; } posscol[v] = pcol; color[v] = newmincol; } } } } while (__any(again)); } } __global__ void runSmall(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, volatile int* const __restrict__ posscol, int* const __restrict__ color) //int* __restrict__ wlsize) { const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int threads = gridDim.x * ThreadsPerBlock; bool again; do { again = false; for (int v = thread; v < nodes; v += threads) { int pcol = posscol[v]; if (__popc(pcol) > 1) { const int beg = nidx[v]; int active = color[v]; int allnei = 0; int keep = active; do { const int old = active; active &= active - 1; const int curr = old ^ active; const int i = beg + __clz(curr); const int nei = nlist[i]; const int neipcol = posscol[nei]; allnei |= neipcol; if ((pcol & neipcol) == 0) { pcol &= pcol - 1; keep ^= curr; } else if (__popc(neipcol) == 1) { pcol ^= neipcol; keep ^= curr; } } while (active != 0); if (keep != 0) { const int best = (unsigned int)MSB >> __clz(pcol); if ((best & ~allnei) != 0) { pcol = best; keep = 0; } } again |= keep; if (keep == 0) keep = __clz(pcol); color[v] = keep; posscol[v] = pcol; } } } while (again); } int main(int argc, char* argv[]) { printf("ECL-GC v1.2 (%s)\n", __FILE__); printf("Copyright 2020 Texas State University\n\n"); if (argc != 3) {printf("USAGE: %s <input_file_name> <repeat>\n\n", argv[0]); exit(-1);} if (WS != 32) {printf("ERROR: warp size must be 32\n\n"); exit(-1);} if (WS != sizeof(int) * 8) {printf("ERROR: bits per word must match warp size\n\n"); exit(-1);} if ((ThreadsPerBlock < WS) || ((ThreadsPerBlock % WS) != 0)) { printf("ERROR: threads per block must be a multiple of the warp size\n\n"); exit(-1); } if ((ThreadsPerBlock & (ThreadsPerBlock - 1)) != 0) { printf("ERROR: threads per block must be a power of two\n\n"); exit(-1); } ECLgraph g = readECLgraph(argv[1]); printf("input: %s\n", argv[1]); printf("nodes: %d\n", g.nodes); printf("edges: %d\n", g.edges); printf("avg degree: %.2f\n", 1.0 * g.edges / g.nodes); const int repeat = atoi(argv[2]); int* const color = new int [g.nodes]; int *nidx_d, *nlist_d, *nlist2_d, *posscol_d, *posscol2_d, *color_d, *wl_d, *wlsize_d; if (hipSuccess != hipMalloc((void **)&nidx_d, (g.nodes + 1) * sizeof(int))) printf("ERROR: could not allocate nidx_d\n\n"); if (hipSuccess != hipMalloc((void **)&nlist_d, g.edges * sizeof(int))) printf("ERROR: could not allocate nlist_d\n\n"); if (hipSuccess != hipMalloc((void **)&nlist2_d, g.edges * sizeof(int))) printf("ERROR: could not allocate nlist2_d\n\n"); if (hipSuccess != hipMalloc((void **)&posscol_d, g.nodes * sizeof(int))) printf("ERROR: could not allocate posscol_d\n\n"); if (hipSuccess != hipMalloc((void **)&posscol2_d, (g.edges / WS + 1) * sizeof(int))) printf("ERROR: could not allocate posscol2_d\n\n"); if (hipSuccess != hipMalloc((void **)&color_d, g.nodes * sizeof(int))) printf("ERROR: could not allocate color_d\n\n"); if (hipSuccess != hipMalloc((void **)&wl_d, g.nodes * sizeof(int))) printf("ERROR: could not allocate wl_d\n\n"); if (hipSuccess != hipMalloc((void **)&wlsize_d, sizeof(int))) printf("ERROR: could not allocate wlsize\n\n"); if (hipSuccess != hipMemcpy(nidx_d, g.nindex, (g.nodes + 1) * sizeof(int), hipMemcpyHostToDevice)) printf("ERROR: copying nidx to device failed\n\n"); if (hipSuccess != hipMemcpy(nlist_d, g.nlist, g.edges * sizeof(int), hipMemcpyHostToDevice)) printf("ERROR: copying nlist to device failed\n\n"); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); const int SMs = deviceProp.multiProcessorCount; const int mTpSM = deviceProp.maxThreadsPerMultiProcessor; const int blocks = SMs * mTpSM / ThreadsPerBlock; printf("Total number of compute units: %d\n", SMs); printf("Maximum resident threads per compute unit: %d\n", mTpSM); printf("Work-group size: %d\n", ThreadsPerBlock); printf("Total number of work-groups: %d\n", blocks); hipDeviceSynchronize(); auto start = std::chrono::high_resolution_clock::now(); for (int n = 0; n < repeat; n++) { hipMemset(wlsize_d, 0, sizeof(int)); hipLaunchKernelGGL(( init), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, g.edges, nidx_d, nlist_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d, wlsize_d); hipLaunchKernelGGL(( runLarge), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, nidx_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d, wlsize_d); hipLaunchKernelGGL(( runSmall), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, nidx_d, nlist_d, posscol_d, color_d); } hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_seconds = end - start; float runtime = elapsed_seconds.count() / repeat; printf("average runtime (%d runs): %.6f s\n", repeat, runtime); printf("throughput: %.6f Mnodes/s\n", g.nodes * 0.000001 / runtime); printf("throughput: %.6f Medges/s\n", g.edges * 0.000001 / runtime); if (hipSuccess != hipMemcpy(color, color_d, g.nodes * sizeof(int), hipMemcpyDeviceToHost)) printf("ERROR: copying color from device failed\n\n"); hipFree(wlsize_d); hipFree(wl_d); hipFree(color_d); hipFree(posscol2_d); hipFree(posscol_d); hipFree(nlist2_d); hipFree(nlist_d); hipFree(nidx_d); bool ok = true; for (int v = 0; v < g.nodes; v++) { if (color[v] < 0) { printf("ERROR: found unprocessed node in graph (node %d with deg %d)\n\n", v, g.nindex[v + 1] - g.nindex[v]); ok = false; break; } for (int i = g.nindex[v]; i < g.nindex[v + 1]; i++) { if (color[g.nlist[i]] == color[v]) { printf("ERROR: found adjacent nodes with same color %d (%d %d)\n\n", color[v], v, g.nlist[i]); ok = false; break; } } } printf("%s\n", ok ? "PASS" : "FAIL"); if (ok) { const int vals = 16; int c[vals]; for (int i = 0; i < vals; i++) c[i] = 0; int cols = -1; for (int v = 0; v < g.nodes; v++) { cols = ::max(cols, color[v]); if (color[v] < vals) c[color[v]]++; } cols++; printf("Number of distinct colors used: %d\n", cols); int sum = 0; for (int i = 0; i < ::min(vals, cols); i++) { sum += c[i]; printf("color %2d: %10d (%5.1f%%)\n", i, c[i], 100.0 * sum / g.nodes); } } delete [] color; freeECLgraph(g); return 0; }
6b0e1024c0414fca672ee7e3a325eb29b538df45.cu
/* ECL-GC code: ECL-GC is a graph-coloring algorithm with shortcutting. The CUDA implementation thereof is quite fast. It operates on graphs stored in binary CSR format. Copyright (c) 2020, Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Ghadeer Alabandi, Evan Powers, and Martin Burtscher URL: The latest version of this code is available at https://userweb.cs.txstate.edu/~burtscher/research/ECL-GC/. Publication: This work is described in detail in the following paper. Ghadeer Alabandi, Evan Powers, and Martin Burtscher. Increasing the Parallelism of Graph Coloring via Shortcutting. Proceedings of the 2020 ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, pp. 262-275. February 2020. */ #include <cstdio> #include <cstdlib> #include <algorithm> #include <chrono> #include <hip/hip_runtime.h> #include "graph.h" static const int ThreadsPerBlock = 512; static const int WS = 32; // warp size and bits per int static const int MSB = 1 << (WS - 1); static const int Mask = (1 << (WS / 2)) - 1; // https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key static __device__ unsigned int hash(unsigned int val) { val = ((val >> 16) ^ val) * 0x45d9f3b; val = ((val >> 16) ^ val) * 0x45d9f3b; return (val >> 16) ^ val; } __global__ void init(const int nodes, const int edges, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nlist2, int* const __restrict__ posscol, int* const __restrict__ posscol2, int* const __restrict__ color, int* const __restrict__ wl, int* __restrict__ wlsize) { const int lane = threadIdx.x % WS; const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int threads = gridDim.x * ThreadsPerBlock; int maxrange = -1; for (int v = thread; __any(v < nodes); v += threads) { bool cond = false; int beg, end, pos, degv, active; if (v < nodes) { beg = nidx[v]; end = nidx[v + 1]; degv = end - beg; cond = (degv >= WS); if (cond) { wl[atomicAdd(wlsize, 1)] = v; } else { active = 0; pos = beg; for (int i = beg; i < end; i++) { const int nei = nlist[i]; const int degn = nidx[nei + 1] - nidx[nei]; if ((degv < degn) || ((degv == degn) && (hash(v) < hash(nei))) || ((degv == degn) && (hash(v) == hash(nei)) && (v < nei))) { active |= (unsigned int)MSB >> (i - beg); pos++; } } } } int bal = __ballot(cond); while (bal != 0) { const int who = __ffs(bal) - 1; bal &= bal - 1; const int wv = __shfl(v, who); const int wbeg = __shfl(beg, who); const int wend = __shfl(end, who); const int wdegv = wend - wbeg; int wpos = wbeg; for (int i = wbeg + lane; __any(i < wend); i += WS) { int wnei; bool prio = false; if (i < wend) { wnei = nlist[i]; const int wdegn = nidx[wnei + 1] - nidx[wnei]; prio = ((wdegv < wdegn) || ((wdegv == wdegn) && (hash(wv) < hash(wnei))) || ((wdegv == wdegn) && (hash(wv) == hash(wnei)) && (wv < wnei))); } const int b = __ballot(prio); const int offs = __popc(b & ((1 << lane) - 1)); if (prio) nlist2[wpos + offs] = wnei; wpos += __popc(b); } if (who == lane) pos = wpos; } if (v < nodes) { const int range = pos - beg; maxrange = max(maxrange, range); color[v] = (cond || (range == 0)) ? (range << (WS / 2)) : active; posscol[v] = (range >= WS) ? -1 : (MSB >> range); } } //if (maxrange >= Mask) printf("too many active neighbors\n"); for (int i = thread; i < edges / WS + 1; i += threads) posscol2[i] = -1; } __global__ void runLarge(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ posscol, int* const __restrict__ posscol2, volatile int* const __restrict__ color, const int* const __restrict__ wl, const int* __restrict__ wlsize) { const int stop = *wlsize; if (stop != 0) { const int lane = threadIdx.x % WS; const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int threads = gridDim.x * ThreadsPerBlock; bool again; do { again = false; for (int w = thread; __any(w < stop); w += threads) { bool shortcut, done, cond = false; int v, data, range, beg, pcol; if (w < stop) { v = wl[w]; data = color[v]; range = data >> (WS / 2); if (range > 0) { beg = nidx[v]; pcol = posscol[v]; cond = true; } } int bal = __ballot(cond); while (bal != 0) { const int who = __ffs(bal) - 1; bal &= bal - 1; const int wdata = __shfl(data, who); const int wrange = wdata >> (WS / 2); const int wbeg = __shfl(beg, who); const int wmincol = wdata & Mask; const int wmaxcol = wmincol + wrange; const int wend = wbeg + wmaxcol; const int woffs = wbeg / WS; int wpcol = __shfl(pcol, who); bool wshortcut = true; bool wdone = true; for (int i = wbeg + lane; __any(i < wend); i += WS) { int nei, neidata, neirange; if (i < wend) { nei = nlist[i]; neidata = color[nei]; neirange = neidata >> (WS / 2); const bool neidone = (neirange == 0); wdone &= neidone; //consolidated below if (neidone) { const int neicol = neidata; if (neicol < WS) { wpcol &= ~((unsigned int)MSB >> neicol); //consolidated below } else { if ((wmincol <= neicol) && (neicol < wmaxcol) && ((posscol2[woffs + neicol / WS] << (neicol % WS)) < 0)) { atomicAnd((int*)&posscol2[woffs + neicol / WS], ~((unsigned int)MSB >> (neicol % WS))); } } } else { const int neimincol = neidata & Mask; const int neimaxcol = neimincol + neirange; if ((neimincol <= wmincol) && (neimaxcol >= wmincol)) wshortcut = false; //consolidated below } } } wshortcut = __all(wshortcut); wdone = __all(wdone); wpcol &= __shfl_xor(wpcol, 1); wpcol &= __shfl_xor(wpcol, 2); wpcol &= __shfl_xor(wpcol, 4); wpcol &= __shfl_xor(wpcol, 8); wpcol &= __shfl_xor(wpcol, 16); if (who == lane) pcol = wpcol; if (who == lane) done = wdone; if (who == lane) shortcut = wshortcut; } if (w < stop) { if (range > 0) { const int mincol = data & Mask; int val = pcol, mc = 0; if (pcol == 0) { const int offs = beg / WS; mc = max(1, mincol / WS); while ((val = posscol2[offs + mc]) == 0) mc++; } int newmincol = mc * WS + __clz(val); if (mincol != newmincol) shortcut = false; if (shortcut || done) { pcol = (newmincol < WS) ? ((unsigned int)MSB >> newmincol) : 0; } else { const int maxcol = mincol + range; const int range = maxcol - newmincol; newmincol = (range << (WS / 2)) | newmincol; again = true; } posscol[v] = pcol; color[v] = newmincol; } } } } while (__any(again)); } } __global__ void runSmall(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, volatile int* const __restrict__ posscol, int* const __restrict__ color) //int* __restrict__ wlsize) { const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int threads = gridDim.x * ThreadsPerBlock; bool again; do { again = false; for (int v = thread; v < nodes; v += threads) { int pcol = posscol[v]; if (__popc(pcol) > 1) { const int beg = nidx[v]; int active = color[v]; int allnei = 0; int keep = active; do { const int old = active; active &= active - 1; const int curr = old ^ active; const int i = beg + __clz(curr); const int nei = nlist[i]; const int neipcol = posscol[nei]; allnei |= neipcol; if ((pcol & neipcol) == 0) { pcol &= pcol - 1; keep ^= curr; } else if (__popc(neipcol) == 1) { pcol ^= neipcol; keep ^= curr; } } while (active != 0); if (keep != 0) { const int best = (unsigned int)MSB >> __clz(pcol); if ((best & ~allnei) != 0) { pcol = best; keep = 0; } } again |= keep; if (keep == 0) keep = __clz(pcol); color[v] = keep; posscol[v] = pcol; } } } while (again); } int main(int argc, char* argv[]) { printf("ECL-GC v1.2 (%s)\n", __FILE__); printf("Copyright 2020 Texas State University\n\n"); if (argc != 3) {printf("USAGE: %s <input_file_name> <repeat>\n\n", argv[0]); exit(-1);} if (WS != 32) {printf("ERROR: warp size must be 32\n\n"); exit(-1);} if (WS != sizeof(int) * 8) {printf("ERROR: bits per word must match warp size\n\n"); exit(-1);} if ((ThreadsPerBlock < WS) || ((ThreadsPerBlock % WS) != 0)) { printf("ERROR: threads per block must be a multiple of the warp size\n\n"); exit(-1); } if ((ThreadsPerBlock & (ThreadsPerBlock - 1)) != 0) { printf("ERROR: threads per block must be a power of two\n\n"); exit(-1); } ECLgraph g = readECLgraph(argv[1]); printf("input: %s\n", argv[1]); printf("nodes: %d\n", g.nodes); printf("edges: %d\n", g.edges); printf("avg degree: %.2f\n", 1.0 * g.edges / g.nodes); const int repeat = atoi(argv[2]); int* const color = new int [g.nodes]; int *nidx_d, *nlist_d, *nlist2_d, *posscol_d, *posscol2_d, *color_d, *wl_d, *wlsize_d; if (hipSuccess != hipMalloc((void **)&nidx_d, (g.nodes + 1) * sizeof(int))) printf("ERROR: could not allocate nidx_d\n\n"); if (hipSuccess != hipMalloc((void **)&nlist_d, g.edges * sizeof(int))) printf("ERROR: could not allocate nlist_d\n\n"); if (hipSuccess != hipMalloc((void **)&nlist2_d, g.edges * sizeof(int))) printf("ERROR: could not allocate nlist2_d\n\n"); if (hipSuccess != hipMalloc((void **)&posscol_d, g.nodes * sizeof(int))) printf("ERROR: could not allocate posscol_d\n\n"); if (hipSuccess != hipMalloc((void **)&posscol2_d, (g.edges / WS + 1) * sizeof(int))) printf("ERROR: could not allocate posscol2_d\n\n"); if (hipSuccess != hipMalloc((void **)&color_d, g.nodes * sizeof(int))) printf("ERROR: could not allocate color_d\n\n"); if (hipSuccess != hipMalloc((void **)&wl_d, g.nodes * sizeof(int))) printf("ERROR: could not allocate wl_d\n\n"); if (hipSuccess != hipMalloc((void **)&wlsize_d, sizeof(int))) printf("ERROR: could not allocate wlsize\n\n"); if (hipSuccess != hipMemcpy(nidx_d, g.nindex, (g.nodes + 1) * sizeof(int), hipMemcpyHostToDevice)) printf("ERROR: copying nidx to device failed\n\n"); if (hipSuccess != hipMemcpy(nlist_d, g.nlist, g.edges * sizeof(int), hipMemcpyHostToDevice)) printf("ERROR: copying nlist to device failed\n\n"); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); const int SMs = deviceProp.multiProcessorCount; const int mTpSM = deviceProp.maxThreadsPerMultiProcessor; const int blocks = SMs * mTpSM / ThreadsPerBlock; printf("Total number of compute units: %d\n", SMs); printf("Maximum resident threads per compute unit: %d\n", mTpSM); printf("Work-group size: %d\n", ThreadsPerBlock); printf("Total number of work-groups: %d\n", blocks); hipDeviceSynchronize(); auto start = std::chrono::high_resolution_clock::now(); for (int n = 0; n < repeat; n++) { hipMemset(wlsize_d, 0, sizeof(int)); init<<<blocks, ThreadsPerBlock>>>(g.nodes, g.edges, nidx_d, nlist_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d, wlsize_d); runLarge<<<blocks, ThreadsPerBlock>>>(g.nodes, nidx_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d, wlsize_d); runSmall<<<blocks, ThreadsPerBlock>>>(g.nodes, nidx_d, nlist_d, posscol_d, color_d); } hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_seconds = end - start; float runtime = elapsed_seconds.count() / repeat; printf("average runtime (%d runs): %.6f s\n", repeat, runtime); printf("throughput: %.6f Mnodes/s\n", g.nodes * 0.000001 / runtime); printf("throughput: %.6f Medges/s\n", g.edges * 0.000001 / runtime); if (hipSuccess != hipMemcpy(color, color_d, g.nodes * sizeof(int), hipMemcpyDeviceToHost)) printf("ERROR: copying color from device failed\n\n"); hipFree(wlsize_d); hipFree(wl_d); hipFree(color_d); hipFree(posscol2_d); hipFree(posscol_d); hipFree(nlist2_d); hipFree(nlist_d); hipFree(nidx_d); bool ok = true; for (int v = 0; v < g.nodes; v++) { if (color[v] < 0) { printf("ERROR: found unprocessed node in graph (node %d with deg %d)\n\n", v, g.nindex[v + 1] - g.nindex[v]); ok = false; break; } for (int i = g.nindex[v]; i < g.nindex[v + 1]; i++) { if (color[g.nlist[i]] == color[v]) { printf("ERROR: found adjacent nodes with same color %d (%d %d)\n\n", color[v], v, g.nlist[i]); ok = false; break; } } } printf("%s\n", ok ? "PASS" : "FAIL"); if (ok) { const int vals = 16; int c[vals]; for (int i = 0; i < vals; i++) c[i] = 0; int cols = -1; for (int v = 0; v < g.nodes; v++) { cols = std::max(cols, color[v]); if (color[v] < vals) c[color[v]]++; } cols++; printf("Number of distinct colors used: %d\n", cols); int sum = 0; for (int i = 0; i < std::min(vals, cols); i++) { sum += c[i]; printf("color %2d: %10d (%5.1f%%)\n", i, c[i], 100.0 * sum / g.nodes); } } delete [] color; freeECLgraph(g); return 0; }
ca3892f1e471d8fa36026d566a87d463f8fae470.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPDX-FileCopyrightText: 2021 CERN // SPDX-License-Identifier: Apache-2.0 #include "TestEm3.cuh" #include <AdePT/BVHNavigator.h> #include <fieldPropagatorConstBz.h> #include <CopCore/PhysicalConstants.h> #include <G4HepEmElectronManager.hh> #include <G4HepEmElectronTrack.hh> #include <G4HepEmElectronInteractionBrem.hh> #include <G4HepEmElectronInteractionIoni.hh> #include <G4HepEmPositronInteractionAnnihilation.hh> // Pull in implementation. #include <G4HepEmRunUtils.icc> #include <G4HepEmInteractionUtils.icc> #include <G4HepEmElectronManager.icc> #include <G4HepEmElectronInteractionBrem.icc> #include <G4HepEmElectronInteractionIoni.icc> #include <G4HepEmPositronInteractionAnnihilation.icc> // Compute the physics and geometry step limit, transport the electrons while // applying the continuous effects and maybe a discrete process that could // generate secondaries. template <bool IsElectron> static __device__ __forceinline__ void TransportElectrons(Track *electrons, const adept::MParray *active, Secondaries &secondaries, adept::MParray *activeQueue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { constexpr int Charge = IsElectron ? -1 : 1; constexpr double Mass = copcore::units::kElectronMassC2; fieldPropagatorConstBz fieldPropagatorBz(BzFieldValue); int activeSize = active->size(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) { const int slot = (*active)[i]; Track &currentTrack = electrons[slot]; auto volume = currentTrack.navState.Top(); int volumeID = volume->id(); int theMCIndex = MCIndex[volumeID]; // Init a track with the needed data to call into G4HepEm. G4HepEmElectronTrack elTrack; G4HepEmTrack *theTrack = elTrack.GetTrack(); theTrack->SetEKin(currentTrack.energy); theTrack->SetMCIndex(theMCIndex); theTrack->SetCharge(Charge); // Sample the `number-of-interaction-left` and put it into the track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = currentTrack.numIALeft[ip]; if (numIALeft <= 0) { numIALeft = -::log(currentTrack.Uniform()); currentTrack.numIALeft[ip] = numIALeft; } theTrack->SetNumIALeft(numIALeft, ip); } // Call G4HepEm to compute the physics step limit. G4HepEmElectronManager::HowFar(&g4HepEmData, &g4HepEmPars, &elTrack); // Get result into variables. double geometricalStepLengthFromPhysics = theTrack->GetGStepLength(); // The phyiscal step length is the amount that the particle experiences // which might be longer than the geometrical step length due to MSC. As // long as we call PerformContinuous in the same kernel we don't need to // care, but we need to make this available when splitting the operations. // double physicalStepLength = elTrack.GetPStepLength(); int winnerProcessIndex = theTrack->GetWinnerProcessIndex(); // Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we // also need to carry them over! // Check if there's a volume boundary in between. double geometryStepLength; vecgeom::NavStateIndex nextState; if (BzFieldValue != 0) { geometryStepLength = fieldPropagatorBz.ComputeStepAndPropagatedState</*Relocate=*/false, BVHNavigator>( currentTrack.energy, Mass, Charge, geometricalStepLengthFromPhysics, currentTrack.pos, currentTrack.dir, currentTrack.navState, nextState); } else { geometryStepLength = BVHNavigator::ComputeStepAndNextVolume( currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics, currentTrack.navState, nextState); currentTrack.pos += geometryStepLength * currentTrack.dir; } atomicAdd(&globalScoring->chargedSteps, 1); atomicAdd(&scoringPerVolume->chargedTrackLength[volumeID], geometryStepLength); if (nextState.IsOnBoundary()) { theTrack->SetGStepLength(geometryStepLength); theTrack->SetOnBoundary(true); } // Apply continuous effects. bool stopped = G4HepEmElectronManager::PerformContinuous(&g4HepEmData, &g4HepEmPars, &elTrack); // Collect the changes. currentTrack.energy = theTrack->GetEKin(); double energyDeposit = theTrack->GetEnergyDeposit(); atomicAdd(&globalScoring->energyDeposit, energyDeposit); atomicAdd(&scoringPerVolume->energyDeposit[volumeID], energyDeposit); // Save the `number-of-interaction-left` in our track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = theTrack->GetNumIALeft(ip); currentTrack.numIALeft[ip] = numIALeft; } if (stopped) { if (!IsElectron) { // Annihilate the stopped positron into two gammas heading to opposite // directions (isotropic). Track &gamma1 = secondaries.gammas.NextTrack(); Track &gamma2 = secondaries.gammas.NextTrack(); atomicAdd(&globalScoring->numGammas, 2); const double cost = 2 * currentTrack.Uniform() - 1; const double sint = sqrt(1 - cost * cost); const double phi = k2Pi * currentTrack.Uniform(); double sinPhi, cosPhi; sincos(phi, &sinPhi, &cosPhi); gamma1.InitAsSecondary(/*parent=*/currentTrack); gamma1.rngState = currentTrack.rngState.Branch(); gamma1.energy = copcore::units::kElectronMassC2; gamma1.dir.Set(sint * cosPhi, sint * sinPhi, cost); gamma2.InitAsSecondary(/*parent=*/currentTrack); // Reuse the RNG state of the dying track. gamma2.rngState = currentTrack.rngState; gamma2.energy = copcore::units::kElectronMassC2; gamma2.dir = -gamma1.dir; } // Particles are killed by not enqueuing them into the new activeQueue. continue; } if (nextState.IsOnBoundary()) { // For now, just count that we hit something. atomicAdd(&globalScoring->hits, 1); // Kill the particle if it left the world. if (nextState.Top() != nullptr) { activeQueue->push_back(slot); BVHNavigator::RelocateToNextVolume(currentTrack.pos, currentTrack.dir, nextState); // Move to the next boundary. currentTrack.navState = nextState; } continue; } else if (winnerProcessIndex < 0) { // No discrete process, move on. activeQueue->push_back(slot); continue; } // Reset number of interaction left for the winner discrete process. // (Will be resampled in the next iteration.) currentTrack.numIALeft[winnerProcessIndex] = -1.0; // Check if a delta interaction happens instead of the real discrete process. if (G4HepEmElectronManager::CheckDelta(&g4HepEmData, theTrack, currentTrack.Uniform())) { // A delta interaction happened, move on. activeQueue->push_back(slot); continue; } // Perform the discrete interaction. RanluxppDoubleEngine rnge(&currentTrack.rngState); // We will need one branched RNG state, prepare while threads are synchronized. RanluxppDouble newRNG(currentTrack.rngState.Branch()); const double energy = currentTrack.energy; const double theElCut = g4HepEmData.fTheMatCutData->fMatCutData[theMCIndex].fSecElProdCutE; switch (winnerProcessIndex) { case 0: { // Invoke ionization (for e-/e+): double deltaEkin = (IsElectron) ? G4HepEmElectronInteractionIoni::SampleETransferMoller(theElCut, energy, &rnge) : G4HepEmElectronInteractionIoni::SampleETransferBhabha(theElCut, energy, &rnge); double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirSecondary[3]; G4HepEmElectronInteractionIoni::SampleDirections(energy, deltaEkin, dirSecondary, dirPrimary, &rnge); Track &secondary = secondaries.electrons.NextTrack(); atomicAdd(&globalScoring->numElectrons, 1); secondary.InitAsSecondary(/*parent=*/currentTrack); secondary.rngState = newRNG; secondary.energy = deltaEkin; secondary.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]); currentTrack.energy = energy - deltaEkin; currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]); // The current track continues to live. activeQueue->push_back(slot); break; } case 1: { // Invoke model for Bremsstrahlung: either SB- or Rel-Brem. double logEnergy = ::log(energy); double deltaEkin = energy < g4HepEmPars.fElectronBremModelLim ? G4HepEmElectronInteractionBrem::SampleETransferSB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron) : G4HepEmElectronInteractionBrem::SampleETransferRB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron); double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirSecondary[3]; G4HepEmElectronInteractionBrem::SampleDirections(energy, deltaEkin, dirSecondary, dirPrimary, &rnge); Track &gamma = secondaries.gammas.NextTrack(); atomicAdd(&globalScoring->numGammas, 1); gamma.InitAsSecondary(/*parent=*/currentTrack); gamma.rngState = newRNG; gamma.energy = deltaEkin; gamma.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]); currentTrack.energy = energy - deltaEkin; currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]); // The current track continues to live. activeQueue->push_back(slot); break; } case 2: { // Invoke annihilation (in-flight) for e+ double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double theGamma1Ekin, theGamma2Ekin; double theGamma1Dir[3], theGamma2Dir[3]; G4HepEmPositronInteractionAnnihilation::SampleEnergyAndDirectionsInFlight( energy, dirPrimary, &theGamma1Ekin, theGamma1Dir, &theGamma2Ekin, theGamma2Dir, &rnge); Track &gamma1 = secondaries.gammas.NextTrack(); Track &gamma2 = secondaries.gammas.NextTrack(); atomicAdd(&globalScoring->numGammas, 2); gamma1.InitAsSecondary(/*parent=*/currentTrack); gamma1.rngState = newRNG; gamma1.energy = theGamma1Ekin; gamma1.dir.Set(theGamma1Dir[0], theGamma1Dir[1], theGamma1Dir[2]); gamma2.InitAsSecondary(/*parent=*/currentTrack); // Reuse the RNG state of the dying track. gamma2.rngState = currentTrack.rngState; gamma2.energy = theGamma2Ekin; gamma2.dir.Set(theGamma2Dir[0], theGamma2Dir[1], theGamma2Dir[2]); // The current track is killed by not enqueuing into the next activeQueue. break; } } } } // Instantiate kernels for electrons and positrons. __global__ void TransportElectrons(Track *electrons, const adept::MParray *active, Secondaries secondaries, adept::MParray *activeQueue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { TransportElectrons</*IsElectron*/ true>(electrons, active, secondaries, activeQueue, globalScoring, scoringPerVolume); } __global__ void TransportPositrons(Track *positrons, const adept::MParray *active, Secondaries secondaries, adept::MParray *activeQueue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { TransportElectrons</*IsElectron*/ false>(positrons, active, secondaries, activeQueue, globalScoring, scoringPerVolume); }
ca3892f1e471d8fa36026d566a87d463f8fae470.cu
// SPDX-FileCopyrightText: 2021 CERN // SPDX-License-Identifier: Apache-2.0 #include "TestEm3.cuh" #include <AdePT/BVHNavigator.h> #include <fieldPropagatorConstBz.h> #include <CopCore/PhysicalConstants.h> #include <G4HepEmElectronManager.hh> #include <G4HepEmElectronTrack.hh> #include <G4HepEmElectronInteractionBrem.hh> #include <G4HepEmElectronInteractionIoni.hh> #include <G4HepEmPositronInteractionAnnihilation.hh> // Pull in implementation. #include <G4HepEmRunUtils.icc> #include <G4HepEmInteractionUtils.icc> #include <G4HepEmElectronManager.icc> #include <G4HepEmElectronInteractionBrem.icc> #include <G4HepEmElectronInteractionIoni.icc> #include <G4HepEmPositronInteractionAnnihilation.icc> // Compute the physics and geometry step limit, transport the electrons while // applying the continuous effects and maybe a discrete process that could // generate secondaries. template <bool IsElectron> static __device__ __forceinline__ void TransportElectrons(Track *electrons, const adept::MParray *active, Secondaries &secondaries, adept::MParray *activeQueue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { constexpr int Charge = IsElectron ? -1 : 1; constexpr double Mass = copcore::units::kElectronMassC2; fieldPropagatorConstBz fieldPropagatorBz(BzFieldValue); int activeSize = active->size(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) { const int slot = (*active)[i]; Track &currentTrack = electrons[slot]; auto volume = currentTrack.navState.Top(); int volumeID = volume->id(); int theMCIndex = MCIndex[volumeID]; // Init a track with the needed data to call into G4HepEm. G4HepEmElectronTrack elTrack; G4HepEmTrack *theTrack = elTrack.GetTrack(); theTrack->SetEKin(currentTrack.energy); theTrack->SetMCIndex(theMCIndex); theTrack->SetCharge(Charge); // Sample the `number-of-interaction-left` and put it into the track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = currentTrack.numIALeft[ip]; if (numIALeft <= 0) { numIALeft = -std::log(currentTrack.Uniform()); currentTrack.numIALeft[ip] = numIALeft; } theTrack->SetNumIALeft(numIALeft, ip); } // Call G4HepEm to compute the physics step limit. G4HepEmElectronManager::HowFar(&g4HepEmData, &g4HepEmPars, &elTrack); // Get result into variables. double geometricalStepLengthFromPhysics = theTrack->GetGStepLength(); // The phyiscal step length is the amount that the particle experiences // which might be longer than the geometrical step length due to MSC. As // long as we call PerformContinuous in the same kernel we don't need to // care, but we need to make this available when splitting the operations. // double physicalStepLength = elTrack.GetPStepLength(); int winnerProcessIndex = theTrack->GetWinnerProcessIndex(); // Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we // also need to carry them over! // Check if there's a volume boundary in between. double geometryStepLength; vecgeom::NavStateIndex nextState; if (BzFieldValue != 0) { geometryStepLength = fieldPropagatorBz.ComputeStepAndPropagatedState</*Relocate=*/false, BVHNavigator>( currentTrack.energy, Mass, Charge, geometricalStepLengthFromPhysics, currentTrack.pos, currentTrack.dir, currentTrack.navState, nextState); } else { geometryStepLength = BVHNavigator::ComputeStepAndNextVolume( currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics, currentTrack.navState, nextState); currentTrack.pos += geometryStepLength * currentTrack.dir; } atomicAdd(&globalScoring->chargedSteps, 1); atomicAdd(&scoringPerVolume->chargedTrackLength[volumeID], geometryStepLength); if (nextState.IsOnBoundary()) { theTrack->SetGStepLength(geometryStepLength); theTrack->SetOnBoundary(true); } // Apply continuous effects. bool stopped = G4HepEmElectronManager::PerformContinuous(&g4HepEmData, &g4HepEmPars, &elTrack); // Collect the changes. currentTrack.energy = theTrack->GetEKin(); double energyDeposit = theTrack->GetEnergyDeposit(); atomicAdd(&globalScoring->energyDeposit, energyDeposit); atomicAdd(&scoringPerVolume->energyDeposit[volumeID], energyDeposit); // Save the `number-of-interaction-left` in our track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = theTrack->GetNumIALeft(ip); currentTrack.numIALeft[ip] = numIALeft; } if (stopped) { if (!IsElectron) { // Annihilate the stopped positron into two gammas heading to opposite // directions (isotropic). Track &gamma1 = secondaries.gammas.NextTrack(); Track &gamma2 = secondaries.gammas.NextTrack(); atomicAdd(&globalScoring->numGammas, 2); const double cost = 2 * currentTrack.Uniform() - 1; const double sint = sqrt(1 - cost * cost); const double phi = k2Pi * currentTrack.Uniform(); double sinPhi, cosPhi; sincos(phi, &sinPhi, &cosPhi); gamma1.InitAsSecondary(/*parent=*/currentTrack); gamma1.rngState = currentTrack.rngState.Branch(); gamma1.energy = copcore::units::kElectronMassC2; gamma1.dir.Set(sint * cosPhi, sint * sinPhi, cost); gamma2.InitAsSecondary(/*parent=*/currentTrack); // Reuse the RNG state of the dying track. gamma2.rngState = currentTrack.rngState; gamma2.energy = copcore::units::kElectronMassC2; gamma2.dir = -gamma1.dir; } // Particles are killed by not enqueuing them into the new activeQueue. continue; } if (nextState.IsOnBoundary()) { // For now, just count that we hit something. atomicAdd(&globalScoring->hits, 1); // Kill the particle if it left the world. if (nextState.Top() != nullptr) { activeQueue->push_back(slot); BVHNavigator::RelocateToNextVolume(currentTrack.pos, currentTrack.dir, nextState); // Move to the next boundary. currentTrack.navState = nextState; } continue; } else if (winnerProcessIndex < 0) { // No discrete process, move on. activeQueue->push_back(slot); continue; } // Reset number of interaction left for the winner discrete process. // (Will be resampled in the next iteration.) currentTrack.numIALeft[winnerProcessIndex] = -1.0; // Check if a delta interaction happens instead of the real discrete process. if (G4HepEmElectronManager::CheckDelta(&g4HepEmData, theTrack, currentTrack.Uniform())) { // A delta interaction happened, move on. activeQueue->push_back(slot); continue; } // Perform the discrete interaction. RanluxppDoubleEngine rnge(&currentTrack.rngState); // We will need one branched RNG state, prepare while threads are synchronized. RanluxppDouble newRNG(currentTrack.rngState.Branch()); const double energy = currentTrack.energy; const double theElCut = g4HepEmData.fTheMatCutData->fMatCutData[theMCIndex].fSecElProdCutE; switch (winnerProcessIndex) { case 0: { // Invoke ionization (for e-/e+): double deltaEkin = (IsElectron) ? G4HepEmElectronInteractionIoni::SampleETransferMoller(theElCut, energy, &rnge) : G4HepEmElectronInteractionIoni::SampleETransferBhabha(theElCut, energy, &rnge); double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirSecondary[3]; G4HepEmElectronInteractionIoni::SampleDirections(energy, deltaEkin, dirSecondary, dirPrimary, &rnge); Track &secondary = secondaries.electrons.NextTrack(); atomicAdd(&globalScoring->numElectrons, 1); secondary.InitAsSecondary(/*parent=*/currentTrack); secondary.rngState = newRNG; secondary.energy = deltaEkin; secondary.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]); currentTrack.energy = energy - deltaEkin; currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]); // The current track continues to live. activeQueue->push_back(slot); break; } case 1: { // Invoke model for Bremsstrahlung: either SB- or Rel-Brem. double logEnergy = std::log(energy); double deltaEkin = energy < g4HepEmPars.fElectronBremModelLim ? G4HepEmElectronInteractionBrem::SampleETransferSB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron) : G4HepEmElectronInteractionBrem::SampleETransferRB(&g4HepEmData, energy, logEnergy, theMCIndex, &rnge, IsElectron); double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirSecondary[3]; G4HepEmElectronInteractionBrem::SampleDirections(energy, deltaEkin, dirSecondary, dirPrimary, &rnge); Track &gamma = secondaries.gammas.NextTrack(); atomicAdd(&globalScoring->numGammas, 1); gamma.InitAsSecondary(/*parent=*/currentTrack); gamma.rngState = newRNG; gamma.energy = deltaEkin; gamma.dir.Set(dirSecondary[0], dirSecondary[1], dirSecondary[2]); currentTrack.energy = energy - deltaEkin; currentTrack.dir.Set(dirPrimary[0], dirPrimary[1], dirPrimary[2]); // The current track continues to live. activeQueue->push_back(slot); break; } case 2: { // Invoke annihilation (in-flight) for e+ double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double theGamma1Ekin, theGamma2Ekin; double theGamma1Dir[3], theGamma2Dir[3]; G4HepEmPositronInteractionAnnihilation::SampleEnergyAndDirectionsInFlight( energy, dirPrimary, &theGamma1Ekin, theGamma1Dir, &theGamma2Ekin, theGamma2Dir, &rnge); Track &gamma1 = secondaries.gammas.NextTrack(); Track &gamma2 = secondaries.gammas.NextTrack(); atomicAdd(&globalScoring->numGammas, 2); gamma1.InitAsSecondary(/*parent=*/currentTrack); gamma1.rngState = newRNG; gamma1.energy = theGamma1Ekin; gamma1.dir.Set(theGamma1Dir[0], theGamma1Dir[1], theGamma1Dir[2]); gamma2.InitAsSecondary(/*parent=*/currentTrack); // Reuse the RNG state of the dying track. gamma2.rngState = currentTrack.rngState; gamma2.energy = theGamma2Ekin; gamma2.dir.Set(theGamma2Dir[0], theGamma2Dir[1], theGamma2Dir[2]); // The current track is killed by not enqueuing into the next activeQueue. break; } } } } // Instantiate kernels for electrons and positrons. __global__ void TransportElectrons(Track *electrons, const adept::MParray *active, Secondaries secondaries, adept::MParray *activeQueue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { TransportElectrons</*IsElectron*/ true>(electrons, active, secondaries, activeQueue, globalScoring, scoringPerVolume); } __global__ void TransportPositrons(Track *positrons, const adept::MParray *active, Secondaries secondaries, adept::MParray *activeQueue, GlobalScoring *globalScoring, ScoringPerVolume *scoringPerVolume) { TransportElectrons</*IsElectron*/ false>(positrons, active, secondaries, activeQueue, globalScoring, scoringPerVolume); }
f5346d50abae848c8d6c7410ae6d645cc1530ea0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <timer.hpp> __global__ void sharedMemoryKernel(const int* x, int* y, const int N) { __shared__ int sharedMemory[7][256]; int sum = 0; int maxSum = 0; int sqrSum = 0; int maxMod = 0; int min = x[0]; int max = 0; int zeros = 0; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) { if (tid < N) { // this if is actually important for when N is smaller than gridsize*blocksize int val = x[tid]; sum += val; maxSum += std::abs(val); sqrSum += val*val; maxMod = std::abs(val) > maxMod ? val : maxMod; min = val < min ? val : min; max = val > max ? val :max; zeros += val == 0 ? 1 : 0; } } int tid = threadIdx.x; if (tid < N) { sharedMemory[0][threadIdx.x] = sum; sharedMemory[1][threadIdx.x] = maxSum; sharedMemory[2][threadIdx.x] = sqrSum; sharedMemory[3][threadIdx.x] = maxMod; sharedMemory[4][threadIdx.x] = min; sharedMemory[5][threadIdx.x] = max; sharedMemory[6][threadIdx.x] = zeros; __syncthreads(); // blockDim.x needs to be a power of 2 in order for this to work for (int i = blockDim.x/2; i != 0; i /= 2) { sharedMemory[0][tid] += sharedMemory[0][tid + i]; sharedMemory[1][tid] += sharedMemory[1][tid + i]; sharedMemory[2][tid] += sharedMemory[2][tid + i]; sharedMemory[3][tid] = sharedMemory[3][tid] > sharedMemory[3][tid + i] ? sharedMemory[3][tid] : sharedMemory[3][tid + i]; sharedMemory[4][tid] = sharedMemory[4][tid] < sharedMemory[4][tid + i] ? sharedMemory[4][tid] : sharedMemory[4][tid + i]; sharedMemory[5][tid] = sharedMemory[5][tid] > sharedMemory[5][tid + i] ? sharedMemory[5][tid] : sharedMemory[5][tid + i]; sharedMemory[6][tid] += sharedMemory[6][tid + i]; } } __syncthreads(); if (tid == 0) { atomicAdd(y, sharedMemory[0][0]); atomicAdd(y+1, sharedMemory[1][0]); atomicAdd(y+2, sharedMemory[2][0]); atomicMax(y+3, sharedMemory[3][0]); atomicMin(y+4, sharedMemory[4][0]); atomicMax(y+5, sharedMemory[5][0]); atomicAdd(y+6, sharedMemory[6][0]); } } __global__ void shuffleKernel(const int* x, int* y, const int N) { int sum = 0; int maxSum = 0; int sqrSum = 0; int maxMod = 0; int min = x[0]; int max = 0; int zeros = 0; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) { if (tid < N) { // this if is actually important for when N is smaller than gridsize*blocksize int val = x[tid]; sum += val; maxSum += std::abs(val); sqrSum += val*val; maxMod = std::abs(val) > maxMod ? val : maxMod; min = val < min ? val : min; max = val > max ? val :max; zeros += val == 0 ? 1 : 0; } } int tid = threadIdx.x; for (int i = warpSize / 2; i != 0; i /= 2) { sum += __shfl_down_sync(0xffffffff, sum, i); maxSum += __shfl_down_sync(0xffffffff, maxSum, i); sqrSum += __shfl_down_sync(0xffffffff, sqrSum, i); int temporary = __shfl_down_sync(0xffffffff, maxMod, i); maxMod = temporary > maxMod ? temporary : maxMod; temporary = __shfl_down_sync(0xffffffff, min, i); min = temporary < min ? temporary : min; temporary = __shfl_down_sync(0xffffffff, max, i); max = temporary > max ? temporary : max; zeros += __shfl_down_sync(0xffffffff, zeros, i); } __syncthreads(); if (tid % warpSize == 0) { atomicAdd(y, sum); atomicAdd(y+1, maxSum); atomicAdd(y+2, sqrSum); atomicMax(y+3, maxMod); atomicMin(y+4, min); atomicMax(y+5, max); atomicAdd(y+6, zeros); } } __global__ void dot_product(int* x, int* y, int* dot, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; __shared__ int cache[128]; int temp = 0; while (index < N) { temp += x[index] * y[index]; index += stride; } cache[threadIdx.x] = temp; __syncthreads(); for (int i = blockDim.x/2; i > 0; i/= 2) { __syncthreads(); if (threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; } if (threadIdx.x == 0) atomicAdd(dot, cache[0]); } template <typename T> void printContainer(T container, int N) { for (int i = 0; i < N; i++) { std::cout << container[i] << " | "; } } int main() { Timer timer; int N = 5; int *x = (int *)malloc(sizeof(int) * N); int *y = (int *)malloc(sizeof(int) * 7); for (int i = 0; i < N; i++) { x[i] = i - N/2; } int *cuda_x; int *cuda_y; hipMalloc(&cuda_x, sizeof(int) * N); hipMalloc(&cuda_y, sizeof(int) * 7); hipMemcpy(cuda_x, x, sizeof(int) * N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( sharedMemoryKernel), dim3(256), dim3(256), 0, 0, cuda_x, cuda_y, N); hipMemcpy(y, cuda_y, sizeof(int) * 7, hipMemcpyDeviceToHost); std::cout << "Input" << std::endl; printContainer(x, N); std::cout << std::endl; std::cout << "Sum of all entries: " << y[0] << std::endl; std::cout << "Sum of maximum values: " << y[1] << std::endl; std::cout << "Sum of squares: " << y[2] << std::endl; std::cout << "Max-norm: " << y[3] << std::endl; std::cout << "minimum value: " << y[4] << std::endl; std::cout << "maximum value: " << y[5] << std::endl; std::cout << "number of zeros: " << y[6] << std::endl; return EXIT_SUCCESS; }
f5346d50abae848c8d6c7410ae6d645cc1530ea0.cu
#include <iostream> #include <timer.hpp> __global__ void sharedMemoryKernel(const int* x, int* y, const int N) { __shared__ int sharedMemory[7][256]; int sum = 0; int maxSum = 0; int sqrSum = 0; int maxMod = 0; int min = x[0]; int max = 0; int zeros = 0; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) { if (tid < N) { // this if is actually important for when N is smaller than gridsize*blocksize int val = x[tid]; sum += val; maxSum += std::abs(val); sqrSum += val*val; maxMod = std::abs(val) > maxMod ? val : maxMod; min = val < min ? val : min; max = val > max ? val :max; zeros += val == 0 ? 1 : 0; } } int tid = threadIdx.x; if (tid < N) { sharedMemory[0][threadIdx.x] = sum; sharedMemory[1][threadIdx.x] = maxSum; sharedMemory[2][threadIdx.x] = sqrSum; sharedMemory[3][threadIdx.x] = maxMod; sharedMemory[4][threadIdx.x] = min; sharedMemory[5][threadIdx.x] = max; sharedMemory[6][threadIdx.x] = zeros; __syncthreads(); // blockDim.x needs to be a power of 2 in order for this to work for (int i = blockDim.x/2; i != 0; i /= 2) { sharedMemory[0][tid] += sharedMemory[0][tid + i]; sharedMemory[1][tid] += sharedMemory[1][tid + i]; sharedMemory[2][tid] += sharedMemory[2][tid + i]; sharedMemory[3][tid] = sharedMemory[3][tid] > sharedMemory[3][tid + i] ? sharedMemory[3][tid] : sharedMemory[3][tid + i]; sharedMemory[4][tid] = sharedMemory[4][tid] < sharedMemory[4][tid + i] ? sharedMemory[4][tid] : sharedMemory[4][tid + i]; sharedMemory[5][tid] = sharedMemory[5][tid] > sharedMemory[5][tid + i] ? sharedMemory[5][tid] : sharedMemory[5][tid + i]; sharedMemory[6][tid] += sharedMemory[6][tid + i]; } } __syncthreads(); if (tid == 0) { atomicAdd(y, sharedMemory[0][0]); atomicAdd(y+1, sharedMemory[1][0]); atomicAdd(y+2, sharedMemory[2][0]); atomicMax(y+3, sharedMemory[3][0]); atomicMin(y+4, sharedMemory[4][0]); atomicMax(y+5, sharedMemory[5][0]); atomicAdd(y+6, sharedMemory[6][0]); } } __global__ void shuffleKernel(const int* x, int* y, const int N) { int sum = 0; int maxSum = 0; int sqrSum = 0; int maxMod = 0; int min = x[0]; int max = 0; int zeros = 0; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) { if (tid < N) { // this if is actually important for when N is smaller than gridsize*blocksize int val = x[tid]; sum += val; maxSum += std::abs(val); sqrSum += val*val; maxMod = std::abs(val) > maxMod ? val : maxMod; min = val < min ? val : min; max = val > max ? val :max; zeros += val == 0 ? 1 : 0; } } int tid = threadIdx.x; for (int i = warpSize / 2; i != 0; i /= 2) { sum += __shfl_down_sync(0xffffffff, sum, i); maxSum += __shfl_down_sync(0xffffffff, maxSum, i); sqrSum += __shfl_down_sync(0xffffffff, sqrSum, i); int temporary = __shfl_down_sync(0xffffffff, maxMod, i); maxMod = temporary > maxMod ? temporary : maxMod; temporary = __shfl_down_sync(0xffffffff, min, i); min = temporary < min ? temporary : min; temporary = __shfl_down_sync(0xffffffff, max, i); max = temporary > max ? temporary : max; zeros += __shfl_down_sync(0xffffffff, zeros, i); } __syncthreads(); if (tid % warpSize == 0) { atomicAdd(y, sum); atomicAdd(y+1, maxSum); atomicAdd(y+2, sqrSum); atomicMax(y+3, maxMod); atomicMin(y+4, min); atomicMax(y+5, max); atomicAdd(y+6, zeros); } } __global__ void dot_product(int* x, int* y, int* dot, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; __shared__ int cache[128]; int temp = 0; while (index < N) { temp += x[index] * y[index]; index += stride; } cache[threadIdx.x] = temp; __syncthreads(); for (int i = blockDim.x/2; i > 0; i/= 2) { __syncthreads(); if (threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; } if (threadIdx.x == 0) atomicAdd(dot, cache[0]); } template <typename T> void printContainer(T container, int N) { for (int i = 0; i < N; i++) { std::cout << container[i] << " | "; } } int main() { Timer timer; int N = 5; int *x = (int *)malloc(sizeof(int) * N); int *y = (int *)malloc(sizeof(int) * 7); for (int i = 0; i < N; i++) { x[i] = i - N/2; } int *cuda_x; int *cuda_y; cudaMalloc(&cuda_x, sizeof(int) * N); cudaMalloc(&cuda_y, sizeof(int) * 7); cudaMemcpy(cuda_x, x, sizeof(int) * N, cudaMemcpyHostToDevice); sharedMemoryKernel<<<256, 256>>>(cuda_x, cuda_y, N); cudaMemcpy(y, cuda_y, sizeof(int) * 7, cudaMemcpyDeviceToHost); std::cout << "Input" << std::endl; printContainer(x, N); std::cout << std::endl; std::cout << "Sum of all entries: " << y[0] << std::endl; std::cout << "Sum of maximum values: " << y[1] << std::endl; std::cout << "Sum of squares: " << y[2] << std::endl; std::cout << "Max-norm: " << y[3] << std::endl; std::cout << "minimum value: " << y[4] << std::endl; std::cout << "maximum value: " << y[5] << std::endl; std::cout << "number of zeros: " << y[6] << std::endl; return EXIT_SUCCESS; }
6673d19e68ef90d51c4a819de1e00fc122f69799.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_2_b; int xdim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_2_b; int ydim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_2_b; int xdim1_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_2_b; int ydim1_update_halo_kernel3_plus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_plus_2_b * (y) + \ xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_plus_2_b * (y) + \ xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b * \ (z)) // user function __device__ inline void update_halo_kernel3_plus_2_b(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, -2, 0)]; if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, -2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_2_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_2_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_2_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 108)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(108, "update_halo_kernel3_plus_2_b"); OPS_kernels[108].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_plus_2_b_h || ydim0 != ydim0_update_halo_kernel3_plus_2_b_h || xdim1 != xdim1_update_halo_kernel3_plus_2_b_h || ydim1 != ydim1_update_halo_kernel3_plus_2_b_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel3_plus_2_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_plus_2_b_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel3_plus_2_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_plus_2_b_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel3_plus_2_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_plus_2_b_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel3_plus_2_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_plus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_2_b), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[108].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
6673d19e68ef90d51c4a819de1e00fc122f69799.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_2_b; int xdim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_2_b; int ydim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_2_b; int xdim1_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_2_b; int ydim1_update_halo_kernel3_plus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_plus_2_b * (y) + \ xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_plus_2_b * (y) + \ xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b * \ (z)) // user function __device__ inline void update_halo_kernel3_plus_2_b(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, -2, 0)]; if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, -2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_2_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_2_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_2_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 108)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(108, "update_halo_kernel3_plus_2_b"); OPS_kernels[108].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_plus_2_b_h || ydim0 != ydim0_update_halo_kernel3_plus_2_b_h || xdim1 != xdim1_update_halo_kernel3_plus_2_b_h || ydim1 != ydim1_update_halo_kernel3_plus_2_b_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel3_plus_2_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_plus_2_b_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel3_plus_2_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_plus_2_b_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel3_plus_2_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_plus_2_b_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel3_plus_2_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_plus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel3_plus_2_b<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[108].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
71b6736ef5353606c679f0653b3379ec503059c3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/count.h> #include "nvstrings/NVStrings.h" #include "nvstrings/NVCategory.h" #include "nvstrings/ipc_transfer.h" // // cd ../build // nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/ipc_test.cu -L. -lNVStrings -lNVCategory -o ipc_test --linker-options -rpath,.: // int strings_test( std::string& mode ) { NVStrings* strs = 0; if( mode.compare("client")==0 ) { nvstrings_ipc_transfer ipc; FILE* fh = fopen("ipctx.bin","rb"); fread(&ipc,1,sizeof(ipc),fh); fclose(fh); printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size); strs = NVStrings::create_from_ipc(ipc); strs->print(); printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() ); } else { const char* hstrs[] = { "John Smith", "Joe Blow", "Jane Smith" }; strs = NVStrings::create_from_array(hstrs,3); nvstrings_ipc_transfer ipc; strs->create_ipc_transfer(ipc); //printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size); strs->print(); printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() ); FILE* fh = fopen("ipctx.bin","wb"); fwrite((void*)&ipc,1,sizeof(ipc),fh); fclose(fh); printf("Server ready. Press enter to terminate.\n"); std::cin.ignore(); // just checking strs->print(); } NVStrings::destroy(strs); return 0; } int category_test( std::string& mode ) { NVCategory* cat = 0; if( mode.compare("client")==0 ) { nvcategory_ipc_transfer ipc; FILE* fh = fopen("ipctx.bin","rb"); fread(&ipc,1,sizeof(ipc),fh); fclose(fh); cat = NVCategory::create_from_ipc(ipc); //printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size); NVStrings* strs = cat->get_keys(); strs->print(); NVStrings::destroy(strs); } else { const char* hstrs[] = { "John", "Jane", "John", "Jane", "Bob" }; NVStrings* strs = NVStrings::create_from_array(hstrs,5); cat = NVCategory::create_from_strings(*strs); nvcategory_ipc_transfer ipc; cat->create_ipc_transfer(ipc); //printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size); NVStrings::destroy(strs); strs = cat->get_keys(); strs->print(); NVStrings::destroy(strs); FILE* fh = fopen("ipctx.bin","wb"); fwrite((void*)&ipc,1,sizeof(ipc),fh); fclose(fh); printf("Server ready. Press enter to terminate.\n"); std::cin.ignore(); } NVCategory::destroy(cat); return 0; } int main( int argc, const char** argv ) { if( argc < 2 ) { printf("require parameter: 'server' or values for pointers\n"); return 0; } std::string mode = argv[1]; //strings_test(mode); category_test(mode); }
71b6736ef5353606c679f0653b3379ec503059c3.cu
#include <stdio.h> #include <string> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/count.h> #include "nvstrings/NVStrings.h" #include "nvstrings/NVCategory.h" #include "nvstrings/ipc_transfer.h" // // cd ../build // nvcc -w -std=c++11 --expt-extended-lambda -gencode arch=compute_70,code=sm_70 ../tests/ipc_test.cu -L. -lNVStrings -lNVCategory -o ipc_test --linker-options -rpath,.: // int strings_test( std::string& mode ) { NVStrings* strs = 0; if( mode.compare("client")==0 ) { nvstrings_ipc_transfer ipc; FILE* fh = fopen("ipctx.bin","rb"); fread(&ipc,1,sizeof(ipc),fh); fclose(fh); printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size); strs = NVStrings::create_from_ipc(ipc); strs->print(); printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() ); } else { const char* hstrs[] = { "John Smith", "Joe Blow", "Jane Smith" }; strs = NVStrings::create_from_array(hstrs,3); nvstrings_ipc_transfer ipc; strs->create_ipc_transfer(ipc); //printf("%p %ld %ld\n", ipc.base_address, ipc.count, ipc.size); strs->print(); printf("%u strings in %ld bytes\n", strs->size(), strs->memsize() ); FILE* fh = fopen("ipctx.bin","wb"); fwrite((void*)&ipc,1,sizeof(ipc),fh); fclose(fh); printf("Server ready. Press enter to terminate.\n"); std::cin.ignore(); // just checking strs->print(); } NVStrings::destroy(strs); return 0; } int category_test( std::string& mode ) { NVCategory* cat = 0; if( mode.compare("client")==0 ) { nvcategory_ipc_transfer ipc; FILE* fh = fopen("ipctx.bin","rb"); fread(&ipc,1,sizeof(ipc),fh); fclose(fh); cat = NVCategory::create_from_ipc(ipc); //printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size); NVStrings* strs = cat->get_keys(); strs->print(); NVStrings::destroy(strs); } else { const char* hstrs[] = { "John", "Jane", "John", "Jane", "Bob" }; NVStrings* strs = NVStrings::create_from_array(hstrs,5); cat = NVCategory::create_from_strings(*strs); nvcategory_ipc_transfer ipc; cat->create_ipc_transfer(ipc); //printf("%p %p:%u %p:%u %p:%ld\n", ipc.base_address, ipc.strs, ipc.keys, ipc.vals, ipc.count, ipc.mem, ipc.size); NVStrings::destroy(strs); strs = cat->get_keys(); strs->print(); NVStrings::destroy(strs); FILE* fh = fopen("ipctx.bin","wb"); fwrite((void*)&ipc,1,sizeof(ipc),fh); fclose(fh); printf("Server ready. Press enter to terminate.\n"); std::cin.ignore(); } NVCategory::destroy(cat); return 0; } int main( int argc, const char** argv ) { if( argc < 2 ) { printf("require parameter: 'server' or values for pointers\n"); return 0; } std::string mode = argv[1]; //strings_test(mode); category_test(mode); }
9265c0de45d0b0c4dbe8c59095d2ce0007ada9be.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2014-2019, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // QuicksortMain.cpp : Defines the entry point for the console application. // #include <stdio.h> #include <assert.h> #include <string.h> #include <limits.h> #include <math.h> #include <iostream> #include <algorithm> #include <iterator> #include <random> #include <vector> #include <map> #include <hip/hip_runtime.h> #define RUN_CPU_SORTS //#define GET_DETAILED_PERFORMANCE #define gpucheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Types: typedef unsigned int uint; #ifdef min #undef min #endif #ifdef max #undef max #endif /// return a timestamp with sub-second precision /** QueryPerformanceCounter and clock_gettime have an undefined starting point (null/zero) * and can wrap around, i.e. be nulled again. **/ double seconds() { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return now.tv_sec + now.tv_nsec / 1000000000.0; } bool parseArgs(int argc, char** argv, unsigned int* test_iterations, unsigned int* widthReSz, unsigned int* heightReSz) { const char sUsageString[512] = "Usage: Quicksort [num test iterations] [SurfWidth(^2 only)] [SurfHeight(^2 only)]"; if (argc != 4) { printf(sUsageString); return false; } else { *test_iterations = atoi (argv[1]); *widthReSz = atoi (argv[2]); *heightReSz = atoi (argv[3]); return true; } } #include "Quicksort.h" #include "QuicksortKernels.cuh" template <class T> T* partition(T* left, T* right, T pivot) { // move pivot to the end T temp = *right; *right = pivot; *left = temp; T* store = left; for(T* p = left; p != right; p++) { if (*p < pivot) { temp = *store; *store = *p; *p = temp; store++; } } temp = *store; *store = pivot; *right = temp; return store; } template <class T> void quicksort(T* data, int left, int right) { T* store = partition(data + left, data + right, data[left]); int nright = store-data; int nleft = nright+1; if (left < nright) { if (nright - left > 32) { quicksort(data, left, nright); } else std::sort(data + left, data + nright + 1); } if (nleft < right) { if (right - nleft > 32) { quicksort(data, nleft, right); } else { std::sort(data + nleft, data + right + 1); } } } template <class T> void gqsort(T *db, T *dnb, std::vector<block_record<T>>& blocks, std::vector<parent_record>& parents, std::vector<work_record<T>>& news, bool reset) { news.resize(blocks.size()*2); #ifdef GET_DETAILED_PERFORMANCE static double absoluteTotal = 0.0; static uint count = 0; if (reset) { absoluteTotal = 0.0; count = 0; } double beginClock, endClock; beginClock = seconds(); #endif block_record<T> *blocksb; parent_record *parentsb; work_record<T> *newsb; gpucheck(hipMalloc((void**)&blocksb, sizeof(block_record<T>)*blocks.size())); gpucheck(hipMalloc((void**)&parentsb, sizeof(parent_record)*parents.size())); gpucheck(hipMalloc((void**)&newsb, sizeof(work_record<T>)*news.size())); gpucheck(hipMemcpy(blocksb, blocks.data(), sizeof(block_record<T>)*blocks.size(), hipMemcpyHostToDevice)); gpucheck(hipMemcpy(parentsb, parents.data(), sizeof(parent_record)*parents.size(), hipMemcpyHostToDevice)); gpucheck(hipMemcpy(newsb, news.data(), sizeof(work_record<T>)*news.size(), hipMemcpyHostToDevice)); hipLaunchKernelGGL(gqsort_kernel, dim3(blocks.size()), dim3(GQSORT_LOCAL_WORKGROUP_SIZE), 0, 0, db, dnb, blocksb, parentsb, newsb); gpucheck( hipPeekAtLastError() ); gpucheck( hipDeviceSynchronize() ); gpucheck(hipMemcpy(news.data(), newsb, sizeof(work_record<T>)*news.size(), hipMemcpyDeviceToHost)); gpucheck(hipFree(blocksb)); gpucheck(hipFree(parentsb)); gpucheck(hipFree(newsb)); #ifdef GET_DETAILED_PERFORMANCE endClock = seconds(); double totalTime = endClock - beginClock; absoluteTotal += totalTime; std::cout << ++count << ": gqsort time " << absoluteTotal * 1000 << " ms" << std::endl; #endif #ifdef DEBUG printf("\noutput news\n"); for (int i = 0; i < news.size(); i++) { printf("%u %u %u %u\n", news[i].start, news[i].end, news[i].pivot, news[i].direction); } #endif } template <class T> void lqsort(T *db, T *dnb, std::vector<work_record<T>>& done) { #ifdef GET_DETAILED_PERFORMANCE double beginClock, endClock; beginClock = seconds(); #endif work_record<T>* doneb; //std::cout << "done size is " << done.size() << std::endl; gpucheck(hipMalloc((void**)&doneb, sizeof(work_record<T>)*done.size())); gpucheck(hipMemcpy(doneb, done.data(), sizeof(work_record<T>)*done.size(), hipMemcpyHostToDevice)); hipLaunchKernelGGL(lqsort_kernel, dim3(done.size()), dim3(LQSORT_LOCAL_WORKGROUP_SIZE), 0, 0, db, dnb, doneb); gpucheck( hipPeekAtLastError() ); gpucheck( hipDeviceSynchronize() ); // Lets do phase 2 pass gpucheck(hipFree(doneb)); #ifdef GET_DETAILED_PERFORMANCE endClock = seconds(); double totalTime = endClock - beginClock; std::cout << "lqsort time " << totalTime * 1000 << " ms" << std::endl; #endif } size_t optp(size_t s, double k, size_t m) { return (size_t)pow(2, floor(log(s*k + m)/log(2.0) + 0.5)); } template <class T> void GPUQSort(size_t size, T* d, T* dn) { // allocate buffers T *db, *dnb; hipMalloc((void**)&db, ((sizeof(T)*size)/64 + 1)*64); hipMemcpy(db, d, ((sizeof(T)*size)/64 + 1)*64, hipMemcpyHostToDevice); hipMalloc((void**)&dnb, ((sizeof(T)*size)/64 + 1)*64); hipMemcpy(dnb, dn, ((sizeof(T)*size)/64 + 1)*64, hipMemcpyHostToDevice); const size_t MAXSEQ = optp(size, 0.00009516, 203); const size_t MAX_SIZE = 12*::max(MAXSEQ, (size_t)QUICKSORT_BLOCK_SIZE); //std::cout << "MAXSEQ = " << MAXSEQ << std::endl; uint startpivot = median_host(d[0], d[size/2], d[size-1]); std::vector<work_record<T>> work, done, news; work.reserve(MAX_SIZE); done.reserve(MAX_SIZE); news.reserve(MAX_SIZE); std::vector<parent_record> parent_records; parent_records.reserve(MAX_SIZE); std::vector<block_record<T>> blocks; blocks.reserve(MAX_SIZE); work.push_back(work_record<T>(0, size, startpivot, 1)); bool reset = true; while(!work.empty() /*&& work.size() + done.size() < MAXSEQ*/) { size_t blocksize = 0; for(auto it = work.begin(); it != work.end(); ++it) { blocksize += ::max((it->end - it->start)/MAXSEQ, (size_t)1); } for(auto it = work.begin(); it != work.end(); ++it) { uint start = it->start; uint end = it->end; uint pivot = it->pivot; uint direction = it->direction; uint blockcount = (end - start + blocksize - 1)/blocksize; parent_record prnt(start, end, start, end, blockcount-1); parent_records.push_back(prnt); for(uint i = 0; i < blockcount - 1; i++) { uint bstart = start + blocksize*i; block_record<T> br(bstart, bstart+blocksize, pivot, direction, parent_records.size()-1); blocks.push_back(br); } block_record<T> br(start + blocksize*(blockcount - 1), end, pivot, direction, parent_records.size()-1); blocks.push_back(br); } //std::cout << " blocks = " << blocks.size() << " parent records = " << parent_records.size() << " news = " << news.size() << std::endl; gqsort<T>(db, dnb, blocks, parent_records, news, reset); reset = false; work.clear(); parent_records.clear(); blocks.clear(); for(auto it = news.begin(); it != news.end(); ++it) { if (it->direction != EMPTY_RECORD) { if (it->end - it->start <= QUICKSORT_BLOCK_SIZE /*size/MAXSEQ*/) { if (it->end - it->start > 0) done.push_back(*it); } else { work.push_back(*it); } } } news.clear(); } for(auto it = work.begin(); it != work.end(); ++it) { if (it->end - it->start > 0) done.push_back(*it); } lqsort<T>(db, dnb, done); hipMemcpy(d, db, ((sizeof(T)*size)/64 + 1)*64, hipMemcpyDeviceToHost); hipFree(db); hipFree(dnb); } template <class T> int test(uint arraySize, unsigned int NUM_ITERATIONS, const std::string& type_name) { double totalTime, quickSortTime, stdSortTime; double beginClock, endClock; printf("\n\n\n--------------------------------------------------------------------\n"); printf("Allocating array size of %d (data type: %s)\n", arraySize, type_name.c_str()); T* pArray = (T*)aligned_alloc (4096, ((arraySize*sizeof(T))/64 + 1)*64); T* pArrayCopy = (T*)aligned_alloc (4096, ((arraySize*sizeof(T))/64 + 1)*64); std::generate(pArray, pArray + arraySize, [](){static T i = 0; return ++i; }); std::shuffle(pArray, pArray + arraySize, std::mt19937(19937)); #ifdef RUN_CPU_SORTS std::cout << "Sorting the regular way..." << std::endl; std::copy(pArray, pArray + arraySize, pArrayCopy); beginClock = seconds(); std::sort(pArrayCopy, pArrayCopy + arraySize); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; stdSortTime = totalTime; std::cout << "quicksort on the cpu: " << std::endl; std::copy(pArray, pArray + arraySize, pArrayCopy); beginClock = seconds(); quicksort(pArrayCopy, 0, arraySize-1); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; quickSortTime = totalTime; #ifdef TRUST_BUT_VERIFY { std::vector<uint> verify(arraySize); std::copy(pArray, pArray + arraySize, verify.begin()); std::cout << "verifying: "; std::sort(verify.begin(), verify.end()); bool correct = std::equal(verify.begin(), verify.end(), pArrayCopy); unsigned int num_discrepancies = 0; if (!correct) { for(size_t i = 0; i < arraySize; i++) { if (verify[i] != pArrayCopy[i]) { //std:: cout << "discrepancy at " << i << " " << pArrayCopy[i] << " expected " << verify[i] << std::endl; num_discrepancies++; } } } std::cout << std::boolalpha << correct << std::endl; if (!correct) { char y; std::cout << "num_discrepancies: " << num_discrepancies << std::endl; std::cin >> y; } } #endif #endif // RUN_CPU_SORTS std::cout << "Sorting with GPU quicksort: " << std::endl; std::vector<uint> original(arraySize); std::copy(pArray, pArray + arraySize, original.begin()); std::vector<double> times; times.resize(NUM_ITERATIONS); double AverageTime = 0.0; uint num_failures = 0; for(uint k = 0; k < NUM_ITERATIONS; k++) { std::copy(original.begin(), original.end(), pArray); std::vector<uint> seqs; std::vector<uint> verify(arraySize); std::copy(pArray, pArray + arraySize, verify.begin()); beginClock = seconds(); GPUQSort(arraySize, pArray, pArrayCopy); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; times[k] = totalTime; AverageTime += totalTime; #ifdef TRUST_BUT_VERIFY std::cout << "verifying: "; std::sort(verify.begin(), verify.end()); bool correct = std::equal(verify.begin(), verify.end(), pArray); unsigned int num_discrepancies = 0; if (!correct) { for(size_t i = 0; i < arraySize; i++) { if (verify[i] != pArray[i]) { std:: cout << "discrepancy at " << i << " " << pArray[i] << " expected " << verify[i] << std::endl; num_discrepancies++; } } } std::cout << std::boolalpha << correct << std::endl; if (!correct) { std::cout << "num_discrepancies: " << num_discrepancies << std::endl; num_failures ++; } #endif } std::cout << " Number of failures: " << num_failures << " out of " << NUM_ITERATIONS << std::endl; AverageTime = AverageTime/NUM_ITERATIONS; std::cout << "Average Time: " << AverageTime * 1000 << " ms" << std::endl; double stdDev = 0.0, minTime = 1000000.0, maxTime = 0.0; for(uint k = 0; k < NUM_ITERATIONS; k++) { stdDev += (AverageTime - times[k])*(AverageTime - times[k]); minTime = ::min(minTime, times[k]); maxTime = ::max(maxTime, times[k]); } if (NUM_ITERATIONS > 1) { stdDev = sqrt(stdDev/(NUM_ITERATIONS - 1)); std::cout << "Standard Deviation: " << stdDev * 1000 << std::endl; std::cout << "%error (3*stdDev)/Average: " << 3*stdDev / AverageTime * 100 << "%" << std::endl; std::cout << "min time: " << minTime * 1000 << " ms" << std::endl; std::cout << "max time: " << maxTime * 1000 << " ms" << std::endl; } #ifdef RUN_CPU_SORTS std::cout << "Average speedup over CPU quicksort: " << quickSortTime/AverageTime << std::endl; std::cout << "Average speedup over CPU std::sort: " << stdSortTime/AverageTime << std::endl; #endif // RUN_CPU_SORTS printf("-------done--------------------------------------------------------\n"); free(pArray); free(pArrayCopy); return 0; } int main(int argc, char** argv) { unsigned int NUM_ITERATIONS; uint heightReSz, widthReSz; bool success = parseArgs (argc, argv, &NUM_ITERATIONS, &widthReSz, &heightReSz); if (!success) return -1; uint arraySize = widthReSz*heightReSz; test<uint>(arraySize, NUM_ITERATIONS, "uint"); test<float>(arraySize, NUM_ITERATIONS, "float"); test<double>(arraySize, NUM_ITERATIONS, "double"); return 0; }
9265c0de45d0b0c4dbe8c59095d2ce0007ada9be.cu
/* Copyright (c) 2014-2019, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // QuicksortMain.cpp : Defines the entry point for the console application. // #include <stdio.h> #include <assert.h> #include <string.h> #include <limits.h> #include <math.h> #include <iostream> #include <algorithm> #include <iterator> #include <random> #include <vector> #include <map> #include <hip/hip_runtime.h> #define RUN_CPU_SORTS //#define GET_DETAILED_PERFORMANCE #define gpucheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Types: typedef unsigned int uint; #ifdef min #undef min #endif #ifdef max #undef max #endif /// return a timestamp with sub-second precision /** QueryPerformanceCounter and clock_gettime have an undefined starting point (null/zero) * and can wrap around, i.e. be nulled again. **/ double seconds() { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return now.tv_sec + now.tv_nsec / 1000000000.0; } bool parseArgs(int argc, char** argv, unsigned int* test_iterations, unsigned int* widthReSz, unsigned int* heightReSz) { const char sUsageString[512] = "Usage: Quicksort [num test iterations] [SurfWidth(^2 only)] [SurfHeight(^2 only)]"; if (argc != 4) { printf(sUsageString); return false; } else { *test_iterations = atoi (argv[1]); *widthReSz = atoi (argv[2]); *heightReSz = atoi (argv[3]); return true; } } #include "Quicksort.h" #include "QuicksortKernels.cuh" template <class T> T* partition(T* left, T* right, T pivot) { // move pivot to the end T temp = *right; *right = pivot; *left = temp; T* store = left; for(T* p = left; p != right; p++) { if (*p < pivot) { temp = *store; *store = *p; *p = temp; store++; } } temp = *store; *store = pivot; *right = temp; return store; } template <class T> void quicksort(T* data, int left, int right) { T* store = partition(data + left, data + right, data[left]); int nright = store-data; int nleft = nright+1; if (left < nright) { if (nright - left > 32) { quicksort(data, left, nright); } else std::sort(data + left, data + nright + 1); } if (nleft < right) { if (right - nleft > 32) { quicksort(data, nleft, right); } else { std::sort(data + nleft, data + right + 1); } } } template <class T> void gqsort(T *db, T *dnb, std::vector<block_record<T>>& blocks, std::vector<parent_record>& parents, std::vector<work_record<T>>& news, bool reset) { news.resize(blocks.size()*2); #ifdef GET_DETAILED_PERFORMANCE static double absoluteTotal = 0.0; static uint count = 0; if (reset) { absoluteTotal = 0.0; count = 0; } double beginClock, endClock; beginClock = seconds(); #endif block_record<T> *blocksb; parent_record *parentsb; work_record<T> *newsb; gpucheck(hipMalloc((void**)&blocksb, sizeof(block_record<T>)*blocks.size())); gpucheck(hipMalloc((void**)&parentsb, sizeof(parent_record)*parents.size())); gpucheck(hipMalloc((void**)&newsb, sizeof(work_record<T>)*news.size())); gpucheck(hipMemcpy(blocksb, blocks.data(), sizeof(block_record<T>)*blocks.size(), hipMemcpyHostToDevice)); gpucheck(hipMemcpy(parentsb, parents.data(), sizeof(parent_record)*parents.size(), hipMemcpyHostToDevice)); gpucheck(hipMemcpy(newsb, news.data(), sizeof(work_record<T>)*news.size(), hipMemcpyHostToDevice)); hipLaunchKernelGGL(gqsort_kernel, dim3(blocks.size()), dim3(GQSORT_LOCAL_WORKGROUP_SIZE), 0, 0, db, dnb, blocksb, parentsb, newsb); gpucheck( hipPeekAtLastError() ); gpucheck( hipDeviceSynchronize() ); gpucheck(hipMemcpy(news.data(), newsb, sizeof(work_record<T>)*news.size(), hipMemcpyDeviceToHost)); gpucheck(hipFree(blocksb)); gpucheck(hipFree(parentsb)); gpucheck(hipFree(newsb)); #ifdef GET_DETAILED_PERFORMANCE endClock = seconds(); double totalTime = endClock - beginClock; absoluteTotal += totalTime; std::cout << ++count << ": gqsort time " << absoluteTotal * 1000 << " ms" << std::endl; #endif #ifdef DEBUG printf("\noutput news\n"); for (int i = 0; i < news.size(); i++) { printf("%u %u %u %u\n", news[i].start, news[i].end, news[i].pivot, news[i].direction); } #endif } template <class T> void lqsort(T *db, T *dnb, std::vector<work_record<T>>& done) { #ifdef GET_DETAILED_PERFORMANCE double beginClock, endClock; beginClock = seconds(); #endif work_record<T>* doneb; //std::cout << "done size is " << done.size() << std::endl; gpucheck(hipMalloc((void**)&doneb, sizeof(work_record<T>)*done.size())); gpucheck(hipMemcpy(doneb, done.data(), sizeof(work_record<T>)*done.size(), hipMemcpyHostToDevice)); hipLaunchKernelGGL(lqsort_kernel, dim3(done.size()), dim3(LQSORT_LOCAL_WORKGROUP_SIZE), 0, 0, db, dnb, doneb); gpucheck( hipPeekAtLastError() ); gpucheck( hipDeviceSynchronize() ); // Lets do phase 2 pass gpucheck(hipFree(doneb)); #ifdef GET_DETAILED_PERFORMANCE endClock = seconds(); double totalTime = endClock - beginClock; std::cout << "lqsort time " << totalTime * 1000 << " ms" << std::endl; #endif } size_t optp(size_t s, double k, size_t m) { return (size_t)pow(2, floor(log(s*k + m)/log(2.0) + 0.5)); } template <class T> void GPUQSort(size_t size, T* d, T* dn) { // allocate buffers T *db, *dnb; hipMalloc((void**)&db, ((sizeof(T)*size)/64 + 1)*64); hipMemcpy(db, d, ((sizeof(T)*size)/64 + 1)*64, hipMemcpyHostToDevice); hipMalloc((void**)&dnb, ((sizeof(T)*size)/64 + 1)*64); hipMemcpy(dnb, dn, ((sizeof(T)*size)/64 + 1)*64, hipMemcpyHostToDevice); const size_t MAXSEQ = optp(size, 0.00009516, 203); const size_t MAX_SIZE = 12*std::max(MAXSEQ, (size_t)QUICKSORT_BLOCK_SIZE); //std::cout << "MAXSEQ = " << MAXSEQ << std::endl; uint startpivot = median_host(d[0], d[size/2], d[size-1]); std::vector<work_record<T>> work, done, news; work.reserve(MAX_SIZE); done.reserve(MAX_SIZE); news.reserve(MAX_SIZE); std::vector<parent_record> parent_records; parent_records.reserve(MAX_SIZE); std::vector<block_record<T>> blocks; blocks.reserve(MAX_SIZE); work.push_back(work_record<T>(0, size, startpivot, 1)); bool reset = true; while(!work.empty() /*&& work.size() + done.size() < MAXSEQ*/) { size_t blocksize = 0; for(auto it = work.begin(); it != work.end(); ++it) { blocksize += std::max((it->end - it->start)/MAXSEQ, (size_t)1); } for(auto it = work.begin(); it != work.end(); ++it) { uint start = it->start; uint end = it->end; uint pivot = it->pivot; uint direction = it->direction; uint blockcount = (end - start + blocksize - 1)/blocksize; parent_record prnt(start, end, start, end, blockcount-1); parent_records.push_back(prnt); for(uint i = 0; i < blockcount - 1; i++) { uint bstart = start + blocksize*i; block_record<T> br(bstart, bstart+blocksize, pivot, direction, parent_records.size()-1); blocks.push_back(br); } block_record<T> br(start + blocksize*(blockcount - 1), end, pivot, direction, parent_records.size()-1); blocks.push_back(br); } //std::cout << " blocks = " << blocks.size() << " parent records = " << parent_records.size() << " news = " << news.size() << std::endl; gqsort<T>(db, dnb, blocks, parent_records, news, reset); reset = false; work.clear(); parent_records.clear(); blocks.clear(); for(auto it = news.begin(); it != news.end(); ++it) { if (it->direction != EMPTY_RECORD) { if (it->end - it->start <= QUICKSORT_BLOCK_SIZE /*size/MAXSEQ*/) { if (it->end - it->start > 0) done.push_back(*it); } else { work.push_back(*it); } } } news.clear(); } for(auto it = work.begin(); it != work.end(); ++it) { if (it->end - it->start > 0) done.push_back(*it); } lqsort<T>(db, dnb, done); hipMemcpy(d, db, ((sizeof(T)*size)/64 + 1)*64, hipMemcpyDeviceToHost); hipFree(db); hipFree(dnb); } template <class T> int test(uint arraySize, unsigned int NUM_ITERATIONS, const std::string& type_name) { double totalTime, quickSortTime, stdSortTime; double beginClock, endClock; printf("\n\n\n--------------------------------------------------------------------\n"); printf("Allocating array size of %d (data type: %s)\n", arraySize, type_name.c_str()); T* pArray = (T*)aligned_alloc (4096, ((arraySize*sizeof(T))/64 + 1)*64); T* pArrayCopy = (T*)aligned_alloc (4096, ((arraySize*sizeof(T))/64 + 1)*64); std::generate(pArray, pArray + arraySize, [](){static T i = 0; return ++i; }); std::shuffle(pArray, pArray + arraySize, std::mt19937(19937)); #ifdef RUN_CPU_SORTS std::cout << "Sorting the regular way..." << std::endl; std::copy(pArray, pArray + arraySize, pArrayCopy); beginClock = seconds(); std::sort(pArrayCopy, pArrayCopy + arraySize); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; stdSortTime = totalTime; std::cout << "quicksort on the cpu: " << std::endl; std::copy(pArray, pArray + arraySize, pArrayCopy); beginClock = seconds(); quicksort(pArrayCopy, 0, arraySize-1); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; quickSortTime = totalTime; #ifdef TRUST_BUT_VERIFY { std::vector<uint> verify(arraySize); std::copy(pArray, pArray + arraySize, verify.begin()); std::cout << "verifying: "; std::sort(verify.begin(), verify.end()); bool correct = std::equal(verify.begin(), verify.end(), pArrayCopy); unsigned int num_discrepancies = 0; if (!correct) { for(size_t i = 0; i < arraySize; i++) { if (verify[i] != pArrayCopy[i]) { //std:: cout << "discrepancy at " << i << " " << pArrayCopy[i] << " expected " << verify[i] << std::endl; num_discrepancies++; } } } std::cout << std::boolalpha << correct << std::endl; if (!correct) { char y; std::cout << "num_discrepancies: " << num_discrepancies << std::endl; std::cin >> y; } } #endif #endif // RUN_CPU_SORTS std::cout << "Sorting with GPU quicksort: " << std::endl; std::vector<uint> original(arraySize); std::copy(pArray, pArray + arraySize, original.begin()); std::vector<double> times; times.resize(NUM_ITERATIONS); double AverageTime = 0.0; uint num_failures = 0; for(uint k = 0; k < NUM_ITERATIONS; k++) { std::copy(original.begin(), original.end(), pArray); std::vector<uint> seqs; std::vector<uint> verify(arraySize); std::copy(pArray, pArray + arraySize, verify.begin()); beginClock = seconds(); GPUQSort(arraySize, pArray, pArrayCopy); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; times[k] = totalTime; AverageTime += totalTime; #ifdef TRUST_BUT_VERIFY std::cout << "verifying: "; std::sort(verify.begin(), verify.end()); bool correct = std::equal(verify.begin(), verify.end(), pArray); unsigned int num_discrepancies = 0; if (!correct) { for(size_t i = 0; i < arraySize; i++) { if (verify[i] != pArray[i]) { std:: cout << "discrepancy at " << i << " " << pArray[i] << " expected " << verify[i] << std::endl; num_discrepancies++; } } } std::cout << std::boolalpha << correct << std::endl; if (!correct) { std::cout << "num_discrepancies: " << num_discrepancies << std::endl; num_failures ++; } #endif } std::cout << " Number of failures: " << num_failures << " out of " << NUM_ITERATIONS << std::endl; AverageTime = AverageTime/NUM_ITERATIONS; std::cout << "Average Time: " << AverageTime * 1000 << " ms" << std::endl; double stdDev = 0.0, minTime = 1000000.0, maxTime = 0.0; for(uint k = 0; k < NUM_ITERATIONS; k++) { stdDev += (AverageTime - times[k])*(AverageTime - times[k]); minTime = std::min(minTime, times[k]); maxTime = std::max(maxTime, times[k]); } if (NUM_ITERATIONS > 1) { stdDev = sqrt(stdDev/(NUM_ITERATIONS - 1)); std::cout << "Standard Deviation: " << stdDev * 1000 << std::endl; std::cout << "%error (3*stdDev)/Average: " << 3*stdDev / AverageTime * 100 << "%" << std::endl; std::cout << "min time: " << minTime * 1000 << " ms" << std::endl; std::cout << "max time: " << maxTime * 1000 << " ms" << std::endl; } #ifdef RUN_CPU_SORTS std::cout << "Average speedup over CPU quicksort: " << quickSortTime/AverageTime << std::endl; std::cout << "Average speedup over CPU std::sort: " << stdSortTime/AverageTime << std::endl; #endif // RUN_CPU_SORTS printf("-------done--------------------------------------------------------\n"); free(pArray); free(pArrayCopy); return 0; } int main(int argc, char** argv) { unsigned int NUM_ITERATIONS; uint heightReSz, widthReSz; bool success = parseArgs (argc, argv, &NUM_ITERATIONS, &widthReSz, &heightReSz); if (!success) return -1; uint arraySize = widthReSz*heightReSz; test<uint>(arraySize, NUM_ITERATIONS, "uint"); test<float>(arraySize, NUM_ITERATIONS, "float"); test<double>(arraySize, NUM_ITERATIONS, "double"); return 0; }
f07b9f88f0678009c8a3ffbf83b0906c970bc250.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matmul.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float_t *A = NULL; hipMalloc(&A, XSIZE*YSIZE); const float_t *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float_t *C = NULL; hipMalloc(&C, XSIZE*YSIZE); const float_t alpha = 1; const float_t beta = 1; int n = XSIZE*YSIZE; int d = 1; int k = 1; int max_block_rows = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matmul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,alpha,beta,n,d,k,max_block_rows); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matmul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,alpha,beta,n,d,k,max_block_rows); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matmul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,alpha,beta,n,d,k,max_block_rows); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f07b9f88f0678009c8a3ffbf83b0906c970bc250.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matmul.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float_t *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); const float_t *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float_t *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); const float_t alpha = 1; const float_t beta = 1; int n = XSIZE*YSIZE; int d = 1; int k = 1; int max_block_rows = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matmul<<<gridBlock,threadBlock>>>(A,B,C,alpha,beta,n,d,k,max_block_rows); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matmul<<<gridBlock,threadBlock>>>(A,B,C,alpha,beta,n,d,k,max_block_rows); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matmul<<<gridBlock,threadBlock>>>(A,B,C,alpha,beta,n,d,k,max_block_rows); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c72ac9f4f3ff119115f14fedd29e4fc378d510ec.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file bfs_app.cu * * @brief Gunrock breadth-first search (BFS) application */ #include <gunrock/gunrock.h> // graph construction utilities #include <gunrock/graphio/market.cuh> // breadth-first search includes #include <gunrock/app/bfs/bfs_enactor.cuh> #include <gunrock/app/bfs/bfs_problem.cuh> #include <gunrock/app/bfs/bfs_functor.cuh> #include <moderngpu.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bfs; /** * @brief BFS_Parameter structure */ struct BFS_Parameter : gunrock::app::TestParameter_Base { public: bool mark_predecessors; // mark src-distance vs. parent vertices bool enable_idempotence; // enable idempotence operation bool direction_optimized; // enable direction optimization double max_queue_sizing1; // maximum queue sizing factor BFS_Parameter() { mark_predecessors = false; enable_idempotence = false; direction_optimized = false; max_queue_sizing1 = -1.0f; } ~BFS_Parameter() {} }; template <typename VertexId, typename SizeT, typename Value, // bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE> float runBFS(GRGraph *output, BFS_Parameter *parameter); /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam MARK_PREDECESSORS Enable mark predecessors * * @param[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters * * \return Elapsed run time in milliseconds */ template <typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS> float RunTests_enable_idempotence(GRGraph *output, BFS_Parameter *parameter) { if (parameter->enable_idempotence) return runBFS<VertexId, SizeT, Value, /*INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, true>(output, parameter); else return runBFS<VertexId, SizeT, Value, /*INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, false>(output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * * @param[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters * * \return Elapsed run time in milliseconds */ template <typename VertexId, typename SizeT, typename Value> float RunTests_mark_predecessors(GRGraph *output, BFS_Parameter *parameter) { if (parameter->mark_predecessors) return RunTests_enable_idempotence<VertexId, SizeT, Value, true>(output, parameter); else return RunTests_enable_idempotence<VertexId, SizeT, Value, false>( output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam MARK_PREDECESSORS Enable mark predecessors * @tparam ENABLE_IDEMPOTENCE Enable idempotent operation * * @param[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters * * \return Elapsed run time in milliseconds */ template <typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE> float runBFS(GRGraph *output, BFS_Parameter *parameter) { typedef BFSProblem<VertexId, SizeT, Value, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE> //(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE) > Problem; // does not use double buffer typedef BFSEnactor<Problem> // INSTRUMENT, // DEBUG, // SIZE_CHECK > Enactor; Csr<VertexId, SizeT, Value> *graph = (Csr<VertexId, SizeT, Value> *)parameter->graph; bool quiet = parameter->g_quiet; int max_grid_size = parameter->max_grid_size; int num_gpus = parameter->num_gpus; int num_iters = parameter->iterations; double max_queue_sizing = parameter->max_queue_sizing; double max_queue_sizing1 = parameter->max_queue_sizing1; double max_in_sizing = parameter->max_in_sizing; ContextPtr *context = (ContextPtr *)parameter->context; std::string partition_method = parameter->partition_method; int *gpu_idx = parameter->gpu_idx; hipStream_t *streams = parameter->streams; float partition_factor = parameter->partition_factor; int partition_seed = parameter->partition_seed; bool g_stream_from_host = parameter->g_stream_from_host; std::string traversal_mode = parameter->traversal_mode; bool instrument = parameter->instrumented; bool debug = parameter->debug; bool size_check = parameter->size_check; bool undirected = parameter->g_undirected; bool direction_optimized = parameter->direction_optimized; size_t *org_size = new size_t[num_gpus]; // Allocate host-side label array VertexId *h_labels = new VertexId[graph->nodes]; VertexId *h_preds = NULL; if (MARK_PREDECESSORS) { h_preds = new VertexId[graph->nodes]; } for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&(org_size[gpu]), &dummy); } Problem *problem = new Problem(direction_optimized, undirected); // Allocate problem on GPU util::GRError( problem->Init(g_stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "Problem BFS Initialization Failed", __FILE__, __LINE__); Enactor *enactor = new Enactor(num_gpus, gpu_idx, instrument, debug, size_check, direction_optimized); // BFS enactor map util::GRError(enactor->Init(context, problem, max_grid_size, traversal_mode), "BFS Enactor init failed", __FILE__, __LINE__); CpuTimer cpu_timer; float elapsed = 0.0f; for (int i = 0; i < num_iters; ++i) { printf("Round %d of bfs.\n", i + 1); util::GRError(problem->Reset(parameter->src[i], enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BFS Problem Data Reset Failed", __FILE__, __LINE__); util::GRError(enactor->Reset(), "BFS Enactor Reset failed", __FILE__, __LINE__); cpu_timer.Start(); util::GRError(enactor->Enact(parameter->src[i], traversal_mode), "BFS Problem Enact Failed", __FILE__, __LINE__); cpu_timer.Stop(); elapsed += cpu_timer.ElapsedMillis(); } // Copy out results util::GRError(problem->Extract(h_labels, h_preds), "BFS Problem Data Extraction Failed", __FILE__, __LINE__); output->node_value1 = (Value *)&h_labels[0]; if (MARK_PREDECESSORS) output->node_value2 = (VertexId *)&h_preds[0]; if (!quiet) { printf(" GPU Breath-First Search finished in %lf msec.\n", elapsed); } // Clean up if (org_size) delete[] org_size; org_size = NULL; if (enactor) delete enactor; enactor = NULL; if (problem) delete problem; problem = NULL; return elapsed; } /** * @brief Dispatch function to handle configurations * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Primitive-specific configurations * @param[in] data_t Data type configurations * @param[in] context ModernGPU context * @param[in] streams CUDA stream * * \return Elapsed run time in milliseconds */ float dispatch_bfs(GRGraph *grapho, const GRGraph *graphi, const GRSetup *config, const GRTypes data_t, ContextPtr *context, hipStream_t *streams) { BFS_Parameter *parameter = new BFS_Parameter; parameter->iterations = config->num_iters; parameter->src = (long long *)malloc(sizeof(long long) * config->num_iters); parameter->context = context; parameter->streams = streams; parameter->g_quiet = config->quiet; parameter->num_gpus = config->num_devices; parameter->gpu_idx = config->device_list; parameter->mark_predecessors = config->mark_predecessors; parameter->enable_idempotence = config->enable_idempotence; float elapsed_time; switch (data_t.VTXID_TYPE) { case VTXID_INT: { switch (data_t.SIZET_TYPE) { case SIZET_INT: { switch (data_t.VALUE_TYPE) { case VALUE_INT: // template type = <int, int, int> { // build input CSR format graph Csr<int, int, int> csr(false); csr.nodes = graphi->num_nodes; csr.edges = graphi->num_edges; csr.row_offsets = (int *)graphi->row_offsets; csr.column_indices = (int *)graphi->col_indices; parameter->graph = &csr; // determine source vertex to start switch (config->source_mode) { case randomize: { for (int i = 0; i < parameter->iterations; ++i) { parameter->src[i] = graphio::RandomNode(csr.nodes); } break; } case largest_degree: { int max_deg = 0; int node_id = csr.GetNodeWithHighestDegree(max_deg); for (int i = 0; i < config->num_iters; ++i) { parameter->src[i] = node_id; } break; } case manually: { for (int i = 0; i < parameter->iterations; ++i) { parameter->src[i] = config->source_vertex[i]; } break; } default: { for (int i = 0; i < parameter->iterations; ++i) { parameter->src[i] = 0; } break; } } if (!parameter->g_quiet) { printf(" source: %lld", (long long)parameter->src[0]); for (int i = 1; i < config->num_iters; ++i) { printf(",%lld", (long long)parameter->src[i]); } printf("\n"); } elapsed_time = RunTests_mark_predecessors<int, int, int>(grapho, parameter); // reset for free memory csr.row_offsets = NULL; csr.column_indices = NULL; break; } case VALUE_UINT: // template type = <int, uint, int> { // not yet support printf("Not Yet Support This DataType Combination.\n"); break; } case VALUE_FLOAT: // template type = <int, float, int> { // not yet support printf("Not Yet Support This DataType Combination.\n"); break; } } break; } } break; } } free(parameter->src); return elapsed_time; } /* * @brief Entry of gunrock_bfs function * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Gunrock primitive specific configurations * @param[in] data_t Gunrock data type structure */ float gunrock_bfs(GRGraph *grapho, const GRGraph *graphi, const GRSetup *config, const GRTypes data_t) { // GPU-related configurations int num_gpus = 0; int *gpu_idx = NULL; ContextPtr *context = NULL; hipStream_t *streams = NULL; num_gpus = config->num_devices; gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; ++i) { gpu_idx[i] = config->device_list[i]; } // Create streams and MordernGPU context for each GPU streams = new hipStream_t[num_gpus * num_gpus * 2]; context = new ContextPtr[num_gpus * num_gpus]; if (!config->quiet) { printf(" using %d GPUs:", num_gpus); } for (int gpu = 0; gpu < num_gpus; ++gpu) { if (!config->quiet) { printf(" %d ", gpu_idx[gpu]); } util::SetDevice(gpu_idx[gpu]); for (int i = 0; i < num_gpus * 2; ++i) { int _i = gpu * num_gpus * 2 + i; util::GRError(hipStreamCreate(&streams[_i]), "hipStreamCreate fialed.", __FILE__, __LINE__); if (i < num_gpus) { context[gpu * num_gpus + i] = mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu], streams[_i]); } } } if (!config->quiet) { printf("\n"); } return dispatch_bfs(grapho, graphi, config, data_t, context, streams); } /* * @brief Simple interface take in CSR arrays as input * * @param[out] bfs_label Return BFS label (depth) per nodes or the * predecessor per nodes * @param[in] num_nodes Number of nodes of the input graph * @param[in] num_edges Number of edges of the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] num_iters Number of BFS runs. Note if num_iters > 1, * the bfs_lbel will only store the results from the last run * @param[in] source Sources to begin traverse * @param[in] source_mode Enumerator of source mode: manually, * randomize, largest_degree * @param[in] mark_predecessors If the flag is set, mark predecessors * instead of bfs label * @param[in] enable_idempotence If the flag is set, use optimizations that * allow idempotence operation (will usually bring better performance) */ float bfs(int *bfs_label, int *bfs_preds, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int num_iters, int *source, enum SrcMode source_mode, const bool mark_predecessors, const bool enable_idempotence) { struct GRTypes data_t; // primitive-specific data types data_t.VTXID_TYPE = VTXID_INT; // integer vertex identifier data_t.SIZET_TYPE = SIZET_INT; // integer graph size type data_t.VALUE_TYPE = VALUE_INT; // integer attributes type struct GRSetup *config = InitSetup(num_iters, source); // primitive-specific configures config->mark_predecessors = mark_predecessors; // do not mark predecessors config->enable_idempotence = enable_idempotence; // wether enable idempotence config->source_mode = source_mode; struct GRGraph *grapho = (struct GRGraph *)malloc(sizeof(struct GRGraph)); struct GRGraph *graphi = (struct GRGraph *)malloc(sizeof(struct GRGraph)); graphi->num_nodes = num_nodes; // setting graph nodes graphi->num_edges = num_edges; // setting graph edges graphi->row_offsets = (void *)&row_offsets[0]; // setting row_offsets graphi->col_indices = (void *)&col_indices[0]; // setting col_indices float elapsed_time = gunrock_bfs(grapho, graphi, config, data_t); memcpy(bfs_label, (int *)grapho->node_value1, num_nodes * sizeof(int)); if (mark_predecessors) memcpy(bfs_preds, (int *)grapho->node_value2, num_nodes * sizeof(int)); if (graphi) free(graphi); if (grapho) free(grapho); if (config) free(config); return elapsed_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
c72ac9f4f3ff119115f14fedd29e4fc378d510ec.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file bfs_app.cu * * @brief Gunrock breadth-first search (BFS) application */ #include <gunrock/gunrock.h> // graph construction utilities #include <gunrock/graphio/market.cuh> // breadth-first search includes #include <gunrock/app/bfs/bfs_enactor.cuh> #include <gunrock/app/bfs/bfs_problem.cuh> #include <gunrock/app/bfs/bfs_functor.cuh> #include <moderngpu.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bfs; /** * @brief BFS_Parameter structure */ struct BFS_Parameter : gunrock::app::TestParameter_Base { public: bool mark_predecessors; // mark src-distance vs. parent vertices bool enable_idempotence; // enable idempotence operation bool direction_optimized; // enable direction optimization double max_queue_sizing1; // maximum queue sizing factor BFS_Parameter() { mark_predecessors = false; enable_idempotence = false; direction_optimized = false; max_queue_sizing1 = -1.0f; } ~BFS_Parameter() {} }; template <typename VertexId, typename SizeT, typename Value, // bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE> float runBFS(GRGraph *output, BFS_Parameter *parameter); /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam MARK_PREDECESSORS Enable mark predecessors * * @param[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters * * \return Elapsed run time in milliseconds */ template <typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS> float RunTests_enable_idempotence(GRGraph *output, BFS_Parameter *parameter) { if (parameter->enable_idempotence) return runBFS<VertexId, SizeT, Value, /*INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, true>(output, parameter); else return runBFS<VertexId, SizeT, Value, /*INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, false>(output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * * @param[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters * * \return Elapsed run time in milliseconds */ template <typename VertexId, typename SizeT, typename Value> float RunTests_mark_predecessors(GRGraph *output, BFS_Parameter *parameter) { if (parameter->mark_predecessors) return RunTests_enable_idempotence<VertexId, SizeT, Value, true>(output, parameter); else return RunTests_enable_idempotence<VertexId, SizeT, Value, false>( output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam MARK_PREDECESSORS Enable mark predecessors * @tparam ENABLE_IDEMPOTENCE Enable idempotent operation * * @param[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters * * \return Elapsed run time in milliseconds */ template <typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE> float runBFS(GRGraph *output, BFS_Parameter *parameter) { typedef BFSProblem<VertexId, SizeT, Value, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE> //(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE) > Problem; // does not use double buffer typedef BFSEnactor<Problem> // INSTRUMENT, // DEBUG, // SIZE_CHECK > Enactor; Csr<VertexId, SizeT, Value> *graph = (Csr<VertexId, SizeT, Value> *)parameter->graph; bool quiet = parameter->g_quiet; int max_grid_size = parameter->max_grid_size; int num_gpus = parameter->num_gpus; int num_iters = parameter->iterations; double max_queue_sizing = parameter->max_queue_sizing; double max_queue_sizing1 = parameter->max_queue_sizing1; double max_in_sizing = parameter->max_in_sizing; ContextPtr *context = (ContextPtr *)parameter->context; std::string partition_method = parameter->partition_method; int *gpu_idx = parameter->gpu_idx; cudaStream_t *streams = parameter->streams; float partition_factor = parameter->partition_factor; int partition_seed = parameter->partition_seed; bool g_stream_from_host = parameter->g_stream_from_host; std::string traversal_mode = parameter->traversal_mode; bool instrument = parameter->instrumented; bool debug = parameter->debug; bool size_check = parameter->size_check; bool undirected = parameter->g_undirected; bool direction_optimized = parameter->direction_optimized; size_t *org_size = new size_t[num_gpus]; // Allocate host-side label array VertexId *h_labels = new VertexId[graph->nodes]; VertexId *h_preds = NULL; if (MARK_PREDECESSORS) { h_preds = new VertexId[graph->nodes]; } for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&(org_size[gpu]), &dummy); } Problem *problem = new Problem(direction_optimized, undirected); // Allocate problem on GPU util::GRError( problem->Init(g_stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "Problem BFS Initialization Failed", __FILE__, __LINE__); Enactor *enactor = new Enactor(num_gpus, gpu_idx, instrument, debug, size_check, direction_optimized); // BFS enactor map util::GRError(enactor->Init(context, problem, max_grid_size, traversal_mode), "BFS Enactor init failed", __FILE__, __LINE__); CpuTimer cpu_timer; float elapsed = 0.0f; for (int i = 0; i < num_iters; ++i) { printf("Round %d of bfs.\n", i + 1); util::GRError(problem->Reset(parameter->src[i], enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BFS Problem Data Reset Failed", __FILE__, __LINE__); util::GRError(enactor->Reset(), "BFS Enactor Reset failed", __FILE__, __LINE__); cpu_timer.Start(); util::GRError(enactor->Enact(parameter->src[i], traversal_mode), "BFS Problem Enact Failed", __FILE__, __LINE__); cpu_timer.Stop(); elapsed += cpu_timer.ElapsedMillis(); } // Copy out results util::GRError(problem->Extract(h_labels, h_preds), "BFS Problem Data Extraction Failed", __FILE__, __LINE__); output->node_value1 = (Value *)&h_labels[0]; if (MARK_PREDECESSORS) output->node_value2 = (VertexId *)&h_preds[0]; if (!quiet) { printf(" GPU Breath-First Search finished in %lf msec.\n", elapsed); } // Clean up if (org_size) delete[] org_size; org_size = NULL; if (enactor) delete enactor; enactor = NULL; if (problem) delete problem; problem = NULL; return elapsed; } /** * @brief Dispatch function to handle configurations * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Primitive-specific configurations * @param[in] data_t Data type configurations * @param[in] context ModernGPU context * @param[in] streams CUDA stream * * \return Elapsed run time in milliseconds */ float dispatch_bfs(GRGraph *grapho, const GRGraph *graphi, const GRSetup *config, const GRTypes data_t, ContextPtr *context, cudaStream_t *streams) { BFS_Parameter *parameter = new BFS_Parameter; parameter->iterations = config->num_iters; parameter->src = (long long *)malloc(sizeof(long long) * config->num_iters); parameter->context = context; parameter->streams = streams; parameter->g_quiet = config->quiet; parameter->num_gpus = config->num_devices; parameter->gpu_idx = config->device_list; parameter->mark_predecessors = config->mark_predecessors; parameter->enable_idempotence = config->enable_idempotence; float elapsed_time; switch (data_t.VTXID_TYPE) { case VTXID_INT: { switch (data_t.SIZET_TYPE) { case SIZET_INT: { switch (data_t.VALUE_TYPE) { case VALUE_INT: // template type = <int, int, int> { // build input CSR format graph Csr<int, int, int> csr(false); csr.nodes = graphi->num_nodes; csr.edges = graphi->num_edges; csr.row_offsets = (int *)graphi->row_offsets; csr.column_indices = (int *)graphi->col_indices; parameter->graph = &csr; // determine source vertex to start switch (config->source_mode) { case randomize: { for (int i = 0; i < parameter->iterations; ++i) { parameter->src[i] = graphio::RandomNode(csr.nodes); } break; } case largest_degree: { int max_deg = 0; int node_id = csr.GetNodeWithHighestDegree(max_deg); for (int i = 0; i < config->num_iters; ++i) { parameter->src[i] = node_id; } break; } case manually: { for (int i = 0; i < parameter->iterations; ++i) { parameter->src[i] = config->source_vertex[i]; } break; } default: { for (int i = 0; i < parameter->iterations; ++i) { parameter->src[i] = 0; } break; } } if (!parameter->g_quiet) { printf(" source: %lld", (long long)parameter->src[0]); for (int i = 1; i < config->num_iters; ++i) { printf(",%lld", (long long)parameter->src[i]); } printf("\n"); } elapsed_time = RunTests_mark_predecessors<int, int, int>(grapho, parameter); // reset for free memory csr.row_offsets = NULL; csr.column_indices = NULL; break; } case VALUE_UINT: // template type = <int, uint, int> { // not yet support printf("Not Yet Support This DataType Combination.\n"); break; } case VALUE_FLOAT: // template type = <int, float, int> { // not yet support printf("Not Yet Support This DataType Combination.\n"); break; } } break; } } break; } } free(parameter->src); return elapsed_time; } /* * @brief Entry of gunrock_bfs function * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Gunrock primitive specific configurations * @param[in] data_t Gunrock data type structure */ float gunrock_bfs(GRGraph *grapho, const GRGraph *graphi, const GRSetup *config, const GRTypes data_t) { // GPU-related configurations int num_gpus = 0; int *gpu_idx = NULL; ContextPtr *context = NULL; cudaStream_t *streams = NULL; num_gpus = config->num_devices; gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; ++i) { gpu_idx[i] = config->device_list[i]; } // Create streams and MordernGPU context for each GPU streams = new cudaStream_t[num_gpus * num_gpus * 2]; context = new ContextPtr[num_gpus * num_gpus]; if (!config->quiet) { printf(" using %d GPUs:", num_gpus); } for (int gpu = 0; gpu < num_gpus; ++gpu) { if (!config->quiet) { printf(" %d ", gpu_idx[gpu]); } util::SetDevice(gpu_idx[gpu]); for (int i = 0; i < num_gpus * 2; ++i) { int _i = gpu * num_gpus * 2 + i; util::GRError(cudaStreamCreate(&streams[_i]), "cudaStreamCreate fialed.", __FILE__, __LINE__); if (i < num_gpus) { context[gpu * num_gpus + i] = mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu], streams[_i]); } } } if (!config->quiet) { printf("\n"); } return dispatch_bfs(grapho, graphi, config, data_t, context, streams); } /* * @brief Simple interface take in CSR arrays as input * * @param[out] bfs_label Return BFS label (depth) per nodes or the * predecessor per nodes * @param[in] num_nodes Number of nodes of the input graph * @param[in] num_edges Number of edges of the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] num_iters Number of BFS runs. Note if num_iters > 1, * the bfs_lbel will only store the results from the last run * @param[in] source Sources to begin traverse * @param[in] source_mode Enumerator of source mode: manually, * randomize, largest_degree * @param[in] mark_predecessors If the flag is set, mark predecessors * instead of bfs label * @param[in] enable_idempotence If the flag is set, use optimizations that * allow idempotence operation (will usually bring better performance) */ float bfs(int *bfs_label, int *bfs_preds, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int num_iters, int *source, enum SrcMode source_mode, const bool mark_predecessors, const bool enable_idempotence) { struct GRTypes data_t; // primitive-specific data types data_t.VTXID_TYPE = VTXID_INT; // integer vertex identifier data_t.SIZET_TYPE = SIZET_INT; // integer graph size type data_t.VALUE_TYPE = VALUE_INT; // integer attributes type struct GRSetup *config = InitSetup(num_iters, source); // primitive-specific configures config->mark_predecessors = mark_predecessors; // do not mark predecessors config->enable_idempotence = enable_idempotence; // wether enable idempotence config->source_mode = source_mode; struct GRGraph *grapho = (struct GRGraph *)malloc(sizeof(struct GRGraph)); struct GRGraph *graphi = (struct GRGraph *)malloc(sizeof(struct GRGraph)); graphi->num_nodes = num_nodes; // setting graph nodes graphi->num_edges = num_edges; // setting graph edges graphi->row_offsets = (void *)&row_offsets[0]; // setting row_offsets graphi->col_indices = (void *)&col_indices[0]; // setting col_indices float elapsed_time = gunrock_bfs(grapho, graphi, config, data_t); memcpy(bfs_label, (int *)grapho->node_value1, num_nodes * sizeof(int)); if (mark_predecessors) memcpy(bfs_preds, (int *)grapho->node_value2, num_nodes * sizeof(int)); if (graphi) free(graphi); if (grapho) free(grapho); if (config) free(config); return elapsed_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
f74ac39bec8c08ba02cfb7f92867f78249ede1fd.hip
// !!! This is a file automatically generated by hipify!!! #include "funset.hpp" #include <iostream> #include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __device__: , __device____global____device__ __device____device__; __device__ */ __device__ static int min_(int a, int b) { return a > b ? b : a; } __device__ static int max_(int a, int b) { return a > b ? a : b; } /* __global__: ;;,3.2 ;void;, ;, gridblock,(<<< >>>); a kernel,(GPUCUDAkernel( ),__global__);*/ __global__ static void layer_reverse(const float* src, float* dst, int length, int vec0, int vec1) { /* gridDim: ,,, ,,. grid,dim3 blockDim: ,block.dim3, block;,, ; blockIdx: ,; threadblockgrid,blockIdx.x [0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3, blockgrid; threadIdx: ,; threadblock;threadIdx.x, threadIdx.y,threadIdx.z;uint3 ,threadblock */ auto index = threadIdx.x + blockIdx.x * blockDim.x; while (index < length) { auto index1 = (index / vec0) % vec1; auto index2 = vec0 * (vec1 - 2 * index1 - 1) + index; index2 = max_(0, min_(length - 1, index2)); dst[index2] = src[index]; index += blockDim.x * gridDim.x; } } int layer_reverse_gpu(const float* src, float* dst, int length, const std::vector<int>& vec, float* elapsed_time) { /* hipEvent_t: CUDA event types,, CUDA,GPU ,CUDAGPU,CUDA GPU,*/ hipEvent_t start, stop; // hipEventCreate: , hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord: ,,start hipEventRecord(start, 0); float *d_src{ nullptr }, *d_dst{ nullptr }; // hipMalloc: hipMalloc(&d_src, length * sizeof(float)); hipMalloc(&d_dst, length * sizeof(float)); /* hipMemcpy: ,: (1). hipMemcpyHostToHost: (2). hipMemcpyHostToDevice: (3). hipMemcpyDeviceToHost: (4). hipMemcpyDeviceToDevice: (5). hipMemcpyDefault: , (CUDA6.0) cudaMemcpy */ hipMemcpy(d_src, src, length * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_dst, dst, length * sizeof(float), hipMemcpyHostToDevice); /* <<< >>>: CUDA,, CUDA,, ;, ,, ;; kernel,kernel, GPU,; API,<<<Dg,Db,Ns,S>>> ,Dgdim3,grid .Dg,gridDg.x*Dg.y*Dg.zblock;Db dim3,block.Db, blockDb.x*Db.y*Db.zthread;Nssize_t, , (extern __shared__);Ns,0;S cudaStream_t,.S,0. */ layer_reverse << <512, 512 >> >(d_src, d_dst, length, vec[0], vec[1]); hipMemcpy(dst, d_dst, length * sizeof(float), hipMemcpyDeviceToHost); // hipEventRecord: ,,stop hipEventRecord(stop, 0); // hipEventSynchronize: ,, hipEventSynchronize(stop); // cudaEventElapseTime: ,, hipEventElapsedTime(elapsed_time, start, stop); // hipEventDestroy: , hipEventDestroy(start); hipEventDestroy(stop); return 0; }
f74ac39bec8c08ba02cfb7f92867f78249ede1fd.cu
#include "funset.hpp" #include <iostream> #include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __device__: 函数类型限定符,表明被修饰的函数在设备上执行,只能从设备上调用, 但只能在其它__device__函数或者__global__函数中调用;__device__函数不支持递归; __device__函数的函数体内不能声明静态变量;__device__函数的参数数目是不可变化的; 不能对__device__函数取指针 */ __device__ static int min_(int a, int b) { return a > b ? b : a; } __device__ static int max_(int a, int b) { return a > b ? a : b; } /* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在 设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在 设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在 设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符); a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函 数),内核函数必须通过__global__函数类型限定符定义);*/ __global__ static void layer_reverse(const float* src, float* dst, int length, int vec0, int vec1) { /* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个 变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量. 一个grid为三维,为dim3类型; blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含 了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数, 保存的是线程块中每一维的线程数量; blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用 于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是 [0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型, 包含了一个block在grid中各个维度上的索引信息; threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于 说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果 是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类 型,包含了一个thread在block中各个维度的索引信息 */ auto index = threadIdx.x + blockIdx.x * blockDim.x; while (index < length) { auto index1 = (index / vec0) % vec1; auto index2 = vec0 * (vec1 - 2 * index1 - 1) + index; index2 = max_(0, min_(length - 1, index2)); dst[index2] = src[index]; index += blockDim.x * gridDim.x; } } int layer_reverse_gpu(const float* src, float* dst, int length, const std::vector<int>& vec, float* elapsed_time) { /* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某 个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在 GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时*/ cudaEvent_t start, stop; // cudaEventCreate: 创建一个事件对象,异步启动 cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord: 记录一个事件,异步启动,start记录起始时间 cudaEventRecord(start, 0); float *d_src{ nullptr }, *d_dst{ nullptr }; // cudaMalloc: 在设备端分配内存 cudaMalloc(&d_src, length * sizeof(float)); cudaMalloc(&d_dst, length * sizeof(float)); /* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一: (1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端 (2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端 (3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端 (4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端 (5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持 统一虚拟寻址(CUDA6.0及以上版本) cudaMemcpy函数对于主机是同步的 */ cudaMemcpy(d_src, src, length * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_dst, dst, length * sizeof(float), cudaMemcpyHostToDevice); /* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参 数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何 组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何 启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函 数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须 先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在 GPU计算时会发生错误,例如越界等; 使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>> 的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个 维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是 一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个 block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调 用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组 (extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为 cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */ layer_reverse << <512, 512 >> >(d_src, d_dst, length, vec[0], vec[1]); cudaMemcpy(dst, d_dst, length * sizeof(float), cudaMemcpyDeviceToHost); // cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间 cudaEventRecord(stop, 0); // cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动 cudaEventSynchronize(stop); // cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动 cudaEventElapsedTime(elapsed_time, start, stop); // cudaEventDestroy: 销毁事件对象,异步启动 cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
de0a15df9a3e532bea2af0848c1fdce60e1b9281.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } } template <typename Dtype> void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); CUDA_POST_KERNEL_CHECK; } template void adam_update_gpu<float>(int, float*, float*, float*, float, float, float, float); template void adam_update_gpu<double>(int, double*, double*, double*, double, double, double, double); } // namespace caffe
de0a15df9a3e532bea2af0848c1fdce60e1b9281.cu
#include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } } template <typename Dtype> void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); CUDA_POST_KERNEL_CHECK; } template void adam_update_gpu<float>(int, float*, float*, float*, float, float, float, float); template void adam_update_gpu<double>(int, double*, double*, double*, double, double, double, double); } // namespace caffe
f91abda91bb84fb2f8af5395971bddd589af19f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <stdio.h> #include "float.h" //#include "helper_cuda.h" #include "volumehelper.h" //#include "volumehelper.cuh" #include <math.h> #include <helper_cuda.h> #include <helper_math.h> typedef unsigned int uint; typedef unsigned char uchar; //hipArray *d_volumeArray = 0; hipArray *d_volumeArray; hipArray *d_transferFuncArray; int size = 0; int3 dim = { 0,0,0 }; texture<uchar, hipTextureType3D, hipReadModeNormalizedFloat> volumeTex; texture<float4, hipTextureType3D, hipReadModeElementType> transferTex; surface<void, cudaSurfaceType3D> volumeSurf; //hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } #include "raycaster.cuh" #include "logscale.cuh" #include "blurdata.cuh" #include "circlevertex.cuh" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } extern "C" void initCuda( std::vector<unsigned char> h_volume, int3 pDim) { size = pDim.x *pDim.y*pDim.z; dim = pDim; //create 3D global texture hipChannelFormatDesc channelDescVolume = hipCreateChannelDesc<unsigned char>(); hipExtent vol_dim = { pDim.x, pDim.y, pDim.z }; hipMalloc3DArray(&d_volumeArray, &channelDescVolume, vol_dim, hipArraySurfaceLoadStore); // copy data to 3D array hipMemcpy3DParms copyParamsVol = { 0 }; copyParamsVol.srcPtr = make_hipPitchedPtr(h_volume.data(), vol_dim.width * sizeof(unsigned char), vol_dim.width, vol_dim.height); copyParamsVol.dstArray = d_volumeArray; copyParamsVol.extent = vol_dim; copyParamsVol.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParamsVol); // set texture parameters volumeTex.normalized = true; // access with normalized texture coordinates volumeTex.filterMode = hipFilterModeLinear; // linear interpolation volumeTex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates volumeTex.addressMode[1] = hipAddressModeClamp; //volumeTex.addressMode[2] = hipAddressModeClamp; // Bind the array to the texture hipBindTextureToArray(volumeTex, d_volumeArray, channelDescVolume); checkCudaErrors(hipBindSurfaceToArray(volumeSurf, d_volumeArray)); //---------------------transfer tex--------------------------------------------------------- // create transfer function texture float4 transferFunc[] = { //---------- { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, //---------- { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.5, 0.0, 0.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, //---------- { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.5, 0.0, 0.0, }, { 1.0, 1.0, 0.0, 0.0, }, { 0.0, 1.0, 0.0, 0.0, }, { 0.0, 1.0, 1.0, 0.5, }, { 0.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, }; //hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); //checkCudaErrors(hipMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc) / sizeof(float4), 1)); //checkCudaErrors(hipMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); //transferTex.filterMode = hipFilterModeLinear; //transferTex.normalized = true; // access with normalized texture coordinates //transferTex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // // Bind the array to the texture //checkCudaErrors(hipBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>(); hipExtent tf_dim = { 9, 3, 1 }; hipMalloc3DArray(&d_transferFuncArray, &channelDesc, tf_dim); // copy data to 3D array hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr(transferFunc, tf_dim.width * sizeof(float4), tf_dim.width, tf_dim.height); copyParams.dstArray = d_transferFuncArray; copyParams.extent = tf_dim; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); // set texture parameters transferTex.normalized = true; // access with normalized texture coordinates transferTex.filterMode = hipFilterModeLinear; // linear interpolation transferTex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates transferTex.addressMode[1] = hipAddressModeClamp; //Bind the array to the texture hipBindTextureToArray(transferTex, d_transferFuncArray, channelDesc); } // Helper function for using CUDA to add vectors in parallel. extern "C" void addWithCuda(std::vector<int> &c, const std::vector<int> a, const std::vector<int> b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a.data(), size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b.data(), size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(size) , 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c.data(), dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); //return cudaStatus; } extern "C" void freeCudaBuffers() { checkCudaErrors(hipFreeArray(d_volumeArray)); checkCudaErrors(hipFreeArray(d_transferFuncArray)); } extern "C" void copyInvViewMatrix(std::vector<float> pInvViewMatrix) { checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, pInvViewMatrix.data(), sizeof(float)*pInvViewMatrix.size())); }
f91abda91bb84fb2f8af5395971bddd589af19f9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <stdio.h> #include "float.h" //#include "helper_cuda.h" #include "volumehelper.h" //#include "volumehelper.cuh" #include <math.h> #include <helper_cuda.h> #include <helper_math.h> typedef unsigned int uint; typedef unsigned char uchar; //cudaArray *d_volumeArray = 0; cudaArray *d_volumeArray; cudaArray *d_transferFuncArray; int size = 0; int3 dim = { 0,0,0 }; texture<uchar, cudaTextureType3D, cudaReadModeNormalizedFloat> volumeTex; texture<float4, cudaTextureType3D, cudaReadModeElementType> transferTex; surface<void, cudaSurfaceType3D> volumeSurf; //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } #include "raycaster.cuh" #include "logscale.cuh" #include "blurdata.cuh" #include "circlevertex.cuh" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } extern "C" void initCuda( std::vector<unsigned char> h_volume, int3 pDim) { size = pDim.x *pDim.y*pDim.z; dim = pDim; //create 3D global texture cudaChannelFormatDesc channelDescVolume = cudaCreateChannelDesc<unsigned char>(); cudaExtent vol_dim = { pDim.x, pDim.y, pDim.z }; cudaMalloc3DArray(&d_volumeArray, &channelDescVolume, vol_dim, cudaArraySurfaceLoadStore); // copy data to 3D array cudaMemcpy3DParms copyParamsVol = { 0 }; copyParamsVol.srcPtr = make_cudaPitchedPtr(h_volume.data(), vol_dim.width * sizeof(unsigned char), vol_dim.width, vol_dim.height); copyParamsVol.dstArray = d_volumeArray; copyParamsVol.extent = vol_dim; copyParamsVol.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParamsVol); // set texture parameters volumeTex.normalized = true; // access with normalized texture coordinates volumeTex.filterMode = cudaFilterModeLinear; // linear interpolation volumeTex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates volumeTex.addressMode[1] = cudaAddressModeClamp; //volumeTex.addressMode[2] = cudaAddressModeClamp; // Bind the array to the texture cudaBindTextureToArray(volumeTex, d_volumeArray, channelDescVolume); checkCudaErrors(cudaBindSurfaceToArray(volumeSurf, d_volumeArray)); //---------------------transfer tex--------------------------------------------------------- // create transfer function texture float4 transferFunc[] = { //---------- { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, //---------- { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.5, 0.0, 0.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, //---------- { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 0.0, }, { 1.0, 0.5, 0.0, 0.0, }, { 1.0, 1.0, 0.0, 0.0, }, { 0.0, 1.0, 0.0, 0.0, }, { 0.0, 1.0, 1.0, 0.5, }, { 0.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, }; //cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); //checkCudaErrors(cudaMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc) / sizeof(float4), 1)); //checkCudaErrors(cudaMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); //transferTex.filterMode = cudaFilterModeLinear; //transferTex.normalized = true; // access with normalized texture coordinates //transferTex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // // Bind the array to the texture //checkCudaErrors(cudaBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>(); cudaExtent tf_dim = { 9, 3, 1 }; cudaMalloc3DArray(&d_transferFuncArray, &channelDesc, tf_dim); // copy data to 3D array cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr(transferFunc, tf_dim.width * sizeof(float4), tf_dim.width, tf_dim.height); copyParams.dstArray = d_transferFuncArray; copyParams.extent = tf_dim; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); // set texture parameters transferTex.normalized = true; // access with normalized texture coordinates transferTex.filterMode = cudaFilterModeLinear; // linear interpolation transferTex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates transferTex.addressMode[1] = cudaAddressModeClamp; //Bind the array to the texture cudaBindTextureToArray(transferTex, d_transferFuncArray, channelDesc); } // Helper function for using CUDA to add vectors in parallel. extern "C" void addWithCuda(std::vector<int> &c, const std::vector<int> a, const std::vector<int> b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a.data(), size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b.data(), size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel <<<1, size >>> (dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c.data(), dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); //return cudaStatus; } extern "C" void freeCudaBuffers() { checkCudaErrors(cudaFreeArray(d_volumeArray)); checkCudaErrors(cudaFreeArray(d_transferFuncArray)); } extern "C" void copyInvViewMatrix(std::vector<float> pInvViewMatrix) { checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, pInvViewMatrix.data(), sizeof(float)*pInvViewMatrix.size())); }
9b3933f0d7be3e7e493d5158c7ee20e51016ae91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // CUDA Kernel function to add the elemnts of two arrays on the GPU __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } int main(void) { int N = 1<<20; // 1M elements float *x, *y; // Allocate Unified Memory - accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = fmax(maxError, fabs(y[i] - 3.0f)); } std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
9b3933f0d7be3e7e493d5158c7ee20e51016ae91.cu
#include <iostream> #include <math.h> // CUDA Kernel function to add the elemnts of two arrays on the GPU __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } int main(void) { int N = 1<<20; // 1M elements float *x, *y; // Allocate Unified Memory - accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = fmax(maxError, fabs(y[i] - 3.0f)); } std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
7a13e463331a9a02ae4886b9a7ada78219b83134.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include "kernels.h" #include "randgen.h" // some CPU variables extern float Htau; extern int HI; extern int HK_star; extern int HK; extern int V_extended; const float RHO_UPPER = 2.00f; const float RHO_LOWER = 0.35f; const double VOXEL_VOL = 8.0; // Finds the matrix-vector products void FIND_EIGEN_PROD (float *rho , float2 *DFT_big , float *dist , hipfftHandle plan_big , float *eigen , float *gamma , float2 *DFT , hipfftHandle plan , float *Cgamma , float *CSgamma) { // 2 nested block circulant matrices appear in the calculations. The first kernel finds the bases. hipLaunchKernelGGL(( FindBaseBig), dim3(K*V/NT),dim3(NT), 0, 0, rho,DFT_big,dist); // Load the \gamma vector in the small DFT hipLaunchKernelGGL(( LoadVec), dim3(K*V/NT),dim3(NT), 0, 0, gamma,DFT); hipDeviceSynchronize(); // Execute the forward DFT hipfftExecC2C(plan_big,DFT_big,DFT_big,HIPFFT_FORWARD); hipDeviceSynchronize(); // Execute the IDFT of the vector hipfftExecC2C(plan,DFT,DFT,HIPFFT_BACKWARD); // Save the eigenvalues of the correlation matrix. Be careful. These values are multiplied by sqrt(n) already and the square root is taken // At the same time prepare the big FFT object for the IDFT(gamma) that is coming. See kernel carefully. hipLaunchKernelGGL(( DragEigenBig), dim3(K*V/NT),dim3(NT), 0, 0, DFT_big,eigen); hipDeviceSynchronize(); // Throw IDFT(\gamma) into the big object hipLaunchKernelGGL(( LoadAddVec), dim3(K*V/NT),dim3(NT), 0, 0, DFT,DFT_big); hipDeviceSynchronize(); // Perform the final DFT hipfftExecC2C(plan_big,DFT_big,DFT_big,HIPFFT_FORWARD); hipDeviceSynchronize(); // And finally save the products hipLaunchKernelGGL(( DragRealBig), dim3(K*V/NT),dim3(NT), 0, 0, DFT_big,Cgamma,CSgamma); hipDeviceSynchronize(); } // Finds the design matrix times coefficients product void FIND_ZB(float *Z , float * B , float * ZB , float *COVARIATES) { // Calculate the product hipblasSgemv('n',HI,HK_star,1.0f,Z,HI,B,1,0.0f,ZB,1); hipDeviceSynchronize(); // Then save to a vector hipLaunchKernelGGL(( SendToCovariates), dim3(CUBLAS_TMP),dim3(512), 0, 0, ZB,COVARIATES); hipDeviceSynchronize(); } // The following function calculates the gradient. // For grad_gamma the minus sign is take care of when multiplying by sigma // For grad_beta and grad_sigma the minus sign is taken care of when the partial sums are summed in the CPU void FIND_GRAD(float *gamma , float *grad_gamma , float *beta , float *grad_beta , float *sigma , float *grad_sigma , float *voxel_tmp , float *ZB , float *Cgamma , float *vol , double *sigma_part , double *beta_part , double *Hsigma_part , double *Hbeta_part , float * COVARIATES , float *TERM_GAMMA , float2 *DFT , float *eigen , hipfftHandle plan , float *Hsigma , float *Hbeta , float *HZN , float *Hrho , float *rho , float *CSgamma , double *rho_part , double *Hrho_part , float *grad_rho) { // The first kernel calculates common term for all the variables per voxel. It stores it in voxel_tmp. hipLaunchKernelGGL(( GradFirst), dim3(V/NT),dim3(NT), 0, 0, voxel_tmp ,sigma ,Cgamma ,vol , DFT , COVARIATES,TERM_GAMMA); hipDeviceSynchronize(); // The lines below calculate the FFT product that appears in gradient calculations // The vector is already loaded in the FFT object so we just find its IDFT hipfftExecC2C(plan, DFT, DFT, HIPFFT_BACKWARD); hipDeviceSynchronize(); // Now load the eigenvalues of the square root correlation matrix hipLaunchKernelGGL(( LoadAddVecSecond), dim3(K*V/NT),dim3(NT), 0, 0, eigen,DFT); hipDeviceSynchronize(); // And finally find the matrix vector product hipfftExecC2C(plan, DFT, DFT, HIPFFT_FORWARD); hipDeviceSynchronize(); // To finish, send the values to the gradient of \gamma hipLaunchKernelGGL(( DragRealGrad), dim3(K*V/NT),dim3(NT), 0, 0, DFT,grad_gamma,gamma); hipDeviceSynchronize(); // This series of kernels adds up the partial sums for the gradients of scalar parameters hipLaunchKernelGGL(( GradSigma), dim3(NB),dim3(NT), 0, 0, voxel_tmp , sigma_part , Cgamma , TERM_GAMMA ); hipLaunchKernelGGL(( GradBeta), dim3(NB),dim3(NT), 0, 0, voxel_tmp , beta_part); hipLaunchKernelGGL(( GradRho), dim3(NB),dim3(NT), 0, 0, voxel_tmp,rho_part,CSgamma,TERM_GAMMA); hipDeviceSynchronize(); // Move the partial sums and the parameters to the CPU for the adding up int s,ss; hipMemcpy(Hsigma,sigma, HK*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Hrho, rho, HK*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Hbeta, beta, HK_star*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Hsigma_part, sigma_part, NB*HK*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(Hrho_part, rho_part, NB*HK*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(Hbeta_part, beta_part, NB*HK_star*sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Dont get confused. I will find the derivatives inside the parameter vectors. for (s=0 ; s<HK ; s++) { Hrho[s] = Hsigma[s]; Hsigma[s] *= -Htau; } for (s=0 ; s<HK_star ; s++) { Hbeta[s] *= -Htau; Hbeta[s] += HZN[s]; } // First add the partial sums found earlier in the GPU double tmp_sigma[HK]; double tmp_rho[HK]; double tmp_beta[HK_star]; for (ss=0 ; ss<NB ; ss++){ for (s=0 ; s<HK ; s++) { tmp_sigma[s] += Hsigma_part[s+HK*ss]; tmp_rho[s] += Hrho_part[s+HK*ss]; } for (s=0 ; s<HK_star ; s++) { tmp_beta[s] += Hbeta_part[s+HK_star*ss]; } } // And then add the sums to the gradients of the parameters for (s=0 ; s<HK ; s++) { Hsigma[s] += -(float)tmp_sigma[s]; Hrho[s] *= (float)tmp_rho[s]/200; // The 200 appears due to the parametrisation } for (s=0 ; s<HK_star ; s++) { Hbeta[s] += -(float)tmp_beta[s]; } // And finally send everything back to the GPU hipMemcpy(grad_sigma, Hsigma, HK*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(grad_rho, Hrho, HK*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(grad_beta, Hbeta, HK_star*sizeof(float),hipMemcpyHostToDevice); hipDeviceSynchronize(); } // Finds the log-likelihood void FIND_LIK(float *lik_tmp , float *gamma , float *COVARIATES , float * sigma , float *Cgamma , float *TERM_GAMMA , float *vol , double *lik_part , double *Hlik_part , float *ZB , float *HZB , float *Hsigma , float *beta , float *Hbeta , double *likelihood , int *Hcounts ) { // First do the two kernels required in the GPU hipLaunchKernelGGL(( LikFirst), dim3(V/NT),dim3(NT), 0, 0, lik_tmp,gamma,COVARIATES,sigma,Cgamma,TERM_GAMMA,vol); hipDeviceSynchronize(); hipLaunchKernelGGL(( LikSecond), dim3(NB),dim3(NT), 0, 0, lik_tmp,lik_part); hipDeviceSynchronize(); // Define two variables that will be used int s; double sum=0; // Transfer everything you need to the CPU hipMemcpy(Hlik_part, lik_part, NB*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(HZB, ZB, HI*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Hsigma, sigma, HK*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Hbeta, beta, HK_star*sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Add the likelihood partial sums together for (s=0 ; s<NB ; s++) { sum += Hlik_part[s]; } // Add the annoying term that appears in the likelihood equation. for (s=0 ; s<HI ; s++) { sum += (double) HZB[s]*Hcounts[s]; } // Finally add the prior contributions for (s=0 ; s<HK ; s++){ sum += -0.5*Htau*Hsigma[s]*Hsigma[s]; } for (s=0 ; s<HK_star ; s++){ sum += -0.5*Htau*Hbeta[s]*Hbeta[s]; } // Now just save the value that you obtained. likelihood[0] = sum; } // If a move of the HMC is accepted then save the new values void SAVE_ORIGINALS(double *likelihood , float *beta , float *ori_beta , float *sigma , float *ori_sigma , float *gamma , float *ori_gamma , float *Cgamma , float *ori_Cgamma , float *grad_beta , float * ori_grad_beta , float *grad_sigma , float *ori_grad_sigma , float *grad_gamma , float *ori_grad_gamma , float *rho, float *ori_rho, float *grad_rho, float *ori_grad_rho, float *CSgamma, float *ori_CSgamma) { // Replace the proposed values with the original likelihood[1] = likelihood[0]; hipLaunchKernelGGL(( Replace), dim3(1),dim3(K_star), 0, 0, beta,ori_beta); hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, rho,ori_rho); hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, sigma,ori_sigma); hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, gamma,ori_gamma); hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, Cgamma,ori_Cgamma); hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, CSgamma,ori_CSgamma); hipLaunchKernelGGL(( Replace), dim3(1),dim3(K_star), 0, 0, grad_beta,ori_grad_beta); hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, grad_sigma,ori_grad_sigma); hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, grad_gamma,ori_grad_gamma); hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, grad_rho,ori_grad_rho); hipDeviceSynchronize(); } // This function generates the momentum. CUDA won't generate odd number of variables hence the if statement. void GENERATE_MOMENTUM(float *mom_beta , float *mom_sigma , float *mom_gamma,hiprandGenerator_t gen , float *mom_beta_even , float *mom_sigma_even , float *mom_gamma_even , float *Mass_sigma , float *Mass_beta, float *mom_rho, float *mom_rho_even, float *Mass_rho) { // Generate the N(0,1) momentum for the variables of interest // Overall means if ( (HK_star%2)==0 ) { hiprandGenerateNormal(gen , mom_beta , K_star , 0.0f , 1.0f); } else { hiprandGenerateNormal(gen, mom_beta_even, K_star+1 , 0.0f, 1.0f); hipLaunchKernelGGL(( Replace), dim3(1),dim3(HK_star), 0, 0, mom_beta_even,mom_beta); } // Marginal standard deviations if ( (HK%2)==0 ) { hiprandGenerateNormal(gen, mom_sigma, HK , 0.0f, 1.0f); hiprandGenerateNormal(gen, mom_rho, HK , 0.0f, 1.0f); } else { hiprandGenerateNormal(gen, mom_sigma_even, HK+1 , 0.0f, 1.0f); hipLaunchKernelGGL(( Replace), dim3(1),dim3(HK), 0, 0, mom_sigma_even,mom_sigma); hiprandGenerateNormal(gen, mom_rho_even, HK+1 , 0.0f, 1.0f); hipLaunchKernelGGL(( Replace), dim3(1),dim3(HK), 0, 0, mom_rho_even,mom_rho); } // gamma vactors hiprandGenerateNormal(gen, mom_gamma, HK*V , 0.0f, 1.0f); hipDeviceSynchronize(); // Now scale the scalars by their masses hipLaunchKernelGGL(( CrossVector), dim3(1),dim3(HK_star), 0, 0, mom_beta,Mass_beta); hipLaunchKernelGGL(( CrossVector), dim3(1),dim3(HK), 0, 0, mom_sigma,Mass_sigma); hipLaunchKernelGGL(( CrossVector), dim3(1),dim3(HK), 0, 0, mom_rho, Mass_rho); hipDeviceSynchronize(); } // Finds the kinetic energy void FIND_KINETIC(float *mom_beta , float *mom_sigma , float *mom_gamma , double *Kinetic , double *kin_part , double *Hkin_part , float *Hmom_beta , float *Hmom_sigma , float *HMass_sigma , float *HMass_beta , float *mom_rho , float *Hmom_rho , float *HMass_rho) { // Execute the kernels and transfer things to the CPU hipLaunchKernelGGL(( KineticFirst), dim3(NB),dim3(NT), 0, 0, mom_gamma,kin_part); hipDeviceSynchronize(); hipMemcpy(Hkin_part,kin_part,NB*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(Hmom_beta,mom_beta,HK_star*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(Hmom_sigma, mom_sigma, HK*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(Hmom_rho , mom_rho , HK*sizeof(float),hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Define the variables that will be used int s; double sum = 0; // Add the Kinetic parts together for (s=0 ; s<NB ; s++) { sum += Hkin_part[s]; } for (s=0 ; s<HK_star ; s++){ sum += (Hmom_beta[s]*Hmom_beta[s])/HMass_beta[s]; } for (s=0 ; s<HK ; s++){ sum += (Hmom_sigma[s]*Hmom_sigma[s])/HMass_sigma[s]; } for (s=0 ; s<HK ; s++){ sum += (Hmom_rho[s]*Hmom_rho[s])/HMass_rho[s]; } // Multiply by half Kinetic[0] = 0.5*sum; } // Updates the momentum vector according to HMC void UPDATE_MOMENTUM(float size , float *mom_beta , float *grad_beta , float *mom_sigma , float *grad_sigma , float *mom_gamma , float * grad_gamma , float *mom_rho , float *grad_rho , float *Sign_rho) { hipLaunchKernelGGL(( Update), dim3(1),dim3(HK_star), 0, 0, mom_beta,grad_beta,size); hipLaunchKernelGGL(( Update), dim3(1),dim3(HK), 0, 0, mom_sigma, grad_sigma, size); hipLaunchKernelGGL(( UpdateSecond), dim3(1),dim3(HK), 0, 0, mom_rho,grad_rho,size,Sign_rho); hipLaunchKernelGGL(( Update), dim3(HK*V/NT),dim3(NT), 0, 0, mom_gamma,grad_gamma,size); hipDeviceSynchronize(); } // Updates the parameter vector according to HMC void UPDATE_PARAMS(float size , float *beta , float *mom_beta , float *sigma , float *mom_sigma , float *gamma , float *mom_gamma , float *Mass_sigma , float *Mass_beta , float *rho, float *Hrho , float *mom_rho , float *Mass_rho , float *Sign_rho) { // Update the parameters according to the Leapfrog scheme. Remember the Mass vectors contain standard deviations hipLaunchKernelGGL(( UpdateScalars), dim3(1),dim3(HK_star), 0, 0, beta,mom_beta,size,Mass_beta); hipLaunchKernelGGL(( UpdateScalars), dim3(1),dim3(HK), 0, 0, sigma,mom_sigma,size,Mass_sigma); hipLaunchKernelGGL(( UpdateScalars), dim3(1),dim3(HK), 0, 0, rho,mom_rho,size,Mass_rho); hipLaunchKernelGGL(( Update), dim3(HK*V/NT),dim3(NT), 0, 0, gamma,mom_gamma,size); hipDeviceSynchronize(); // Bring the correlation parameters back to see if the boundary conditions are satisfied hipMemcpy(Hrho , rho , HK*sizeof(float) , hipMemcpyDeviceToHost); hipDeviceSynchronize(); int s; float tmp; for (s=0 ; s<HK ; s++) { // Upper bounds if (Hrho[s] > RHO_UPPER) { tmp = RHO_UPPER - (Hrho[s]-RHO_UPPER); hipLaunchKernelGGL(( SetElement), dim3(1),dim3(1), 0, 0, rho,s,tmp); hipLaunchKernelGGL(( SetElement), dim3(1),dim3(1), 0, 0, Sign_rho , s , -1.0f); } // Lower bounds if (Hrho[s] < RHO_LOWER) { tmp = RHO_LOWER + (RHO_LOWER-Hrho[s]); hipLaunchKernelGGL(( SetElement), dim3(1),dim3(1), 0, 0, rho,s,tmp); hipLaunchKernelGGL(( SetElement), dim3(1),dim3(1), 0, 0, Sign_rho,s,-1.0f); } } hipDeviceSynchronize(); } // For when a move is rejected void REVERT_STATE(float *beta , float *ori_beta , float *sigma , float *ori_sigma , float *gamma , float *ori_gamma , float *Cgamma , float *ori_Cgamma , float *grad_beta , float * ori_grad_beta , float *grad_sigma , float *ori_grad_sigma , float *grad_gamma , float *ori_grad_gamma, float *rho, float *ori_rho, float *grad_rho, float *ori_grad_rho, float *CSgamma, float *ori_CSgamma) { hipLaunchKernelGGL(( Replace), dim3(1),dim3(K_star), 0, 0, ori_beta,beta);hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, ori_sigma,sigma);hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, ori_rho,rho); hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, ori_gamma,gamma);hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, ori_Cgamma,Cgamma);hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, ori_CSgamma,CSgamma); hipLaunchKernelGGL(( Replace), dim3(1),dim3(K_star), 0, 0, ori_grad_beta,grad_beta);hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, ori_grad_sigma,grad_sigma);hipLaunchKernelGGL(( Replace), dim3(1),dim3(K), 0, 0, ori_grad_rho,grad_rho);hipLaunchKernelGGL(( Replace), dim3(K*V/NT),dim3(NT), 0, 0, ori_grad_gamma,grad_gamma); hipDeviceSynchronize(); } // Find the kinetic energy for the first part where only the gamma vectors are updated void FIND_KINETIC_GAMMA(float *mom_gamma , double *Kinetic , double *kin_part , double *Hkin_part) { // Execute the kernels and transfer to the CPU hipLaunchKernelGGL(( KineticFirst), dim3(NB),dim3(NT), 0, 0, mom_gamma,kin_part); hipDeviceSynchronize(); hipMemcpy(Hkin_part,kin_part,NB*sizeof(double),hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Define the variables that will be used int s=0; double sum=0; // Add things up for (s=0 ; s<NB ; s++) { sum += Hkin_part[s]; } // Multiply by half Kinetic[0] = 0.5*sum; } // Saves a snapshot of the parameters void SNAPSHOT(float *Hsigma , float *ori_sigma , float *Hrho , float *ori_rho , float *Hbeta , float *ori_beta , float *big , float *ori_gamma ) { FILE *STARTING; int kk; /* Transfer the parameters to the CPU */ hipMemcpy(Hsigma,ori_sigma,HK*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(Hrho ,ori_rho,HK*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(Hbeta,ori_beta,HK_star*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(big,ori_gamma,HK*V_extended*sizeof(float),hipMemcpyDeviceToHost); hipDeviceSynchronize(); /* Write to the file */ STARTING = fopen("./outputs/starting.txt","w"); for (kk=0 ; kk<HK_star; kk++) { fprintf(STARTING,"%.10f\n",Hbeta[kk]); } for (kk=0 ; kk<HK ; kk++) { fprintf(STARTING,"%.10f\n",Hsigma[kk]); } for (kk=0 ; kk<HK ; kk++) { fprintf(STARTING,"%.10f\n",Hrho[kk]); } for (kk=0 ; kk<HK*V_extended ; kk++) { fprintf(STARTING,"%.10f\n",big[kk]); } fclose(STARTING); } /* Updates the study random effects */ void UPDATE_RFX(double *Hrfx , double *rfx, float *HZB, float *ZB, int *Hcounts, float *sigma, float *Cgamma, float *vol, float *COVARIATES, unsigned long *RNG, int* author) { /* Find the sum over voxels and move to host */ hipLaunchKernelGGL(( rfxSum), dim3(CUBLAS_TMP),dim3(512), 0, 0, sigma , Cgamma , vol , COVARIATES , rfx ); hipDeviceSynchronize(); hipMemcpy(Hrfx,rfx,HI*sizeof(double),hipMemcpyDeviceToHost); hipDeviceSynchronize(); /* Add the constant terms */ int i; hipMemcpy(HZB, ZB, HI*sizeof(float), hipMemcpyDeviceToHost); for (i=0 ; i<HI ; i++) { Hrfx[i] *= VOXEL_VOL*exp((double)HZB[i]); } /* Sample the random effect terms from their Gamma full conditionals */ int n_authors = author[HI-1] + 1 ; int author_first, author_last, flag, j; double shape, rate, tmp; for (i=0 ; i<n_authors ; i++) { /* Find the first study from the i-th author */ flag=0; j=-1; while (flag==0){ j += 1; flag = ( i == (author[j]) ); } author_first = j; /* Find the last study from the i-th author */ for (j=author_first ; j<HI ; j++) { if (author[j] == i){ author_last = j; } } /* Find the shape and the rate of the Gamma full conditional */ shape = rfx_phi; rate = rfx_phi; for (j=author_first ; j<=author_last ; j++) { shape += (double)Hcounts[j]; rate += Hrfx[j]; } /* Draw the new random effect */ tmp = rgamma(shape,rate,RNG); /* Save the draw */ for (j=author_first ; j<=author_last ; j++) { Hrfx[j] = tmp; } /* Print something to make sure */ if (i==100) { printf("\nStudy %d First %d Last %d Shape %.5f Rate %.5f",i,author_first,author_last,shape,rate); } } /* Copy the random effects to the GPU */ hipMemcpy(rfx,Hrfx,HI*sizeof(double),hipMemcpyHostToDevice); hipDeviceSynchronize(); /* Copy the random effects in the COVARIATES array */ hipLaunchKernelGGL(( SaveRFX), dim3(CUBLAS_TMP),dim3(512), 0, 0, rfx,COVARIATES); hipDeviceSynchronize(); }
7a13e463331a9a02ae4886b9a7ada78219b83134.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <cufft.h> #include <cublas.h> #include <curand.h> #include "kernels.h" #include "randgen.h" // some CPU variables extern float Htau; extern int HI; extern int HK_star; extern int HK; extern int V_extended; const float RHO_UPPER = 2.00f; const float RHO_LOWER = 0.35f; const double VOXEL_VOL = 8.0; // Finds the matrix-vector products void FIND_EIGEN_PROD (float *rho , float2 *DFT_big , float *dist , cufftHandle plan_big , float *eigen , float *gamma , float2 *DFT , cufftHandle plan , float *Cgamma , float *CSgamma) { // 2 nested block circulant matrices appear in the calculations. The first kernel finds the bases. FindBaseBig<<<K*V/NT,NT>>>(rho,DFT_big,dist); // Load the \gamma vector in the small DFT LoadVec<<<K*V/NT,NT>>>(gamma,DFT); cudaDeviceSynchronize(); // Execute the forward DFT cufftExecC2C(plan_big,DFT_big,DFT_big,CUFFT_FORWARD); cudaDeviceSynchronize(); // Execute the IDFT of the vector cufftExecC2C(plan,DFT,DFT,CUFFT_INVERSE); // Save the eigenvalues of the correlation matrix. Be careful. These values are multiplied by sqrt(n) already and the square root is taken // At the same time prepare the big FFT object for the IDFT(gamma) that is coming. See kernel carefully. DragEigenBig<<<K*V/NT,NT>>>(DFT_big,eigen); cudaDeviceSynchronize(); // Throw IDFT(\gamma) into the big object LoadAddVec<<<K*V/NT,NT>>>(DFT,DFT_big); cudaDeviceSynchronize(); // Perform the final DFT cufftExecC2C(plan_big,DFT_big,DFT_big,CUFFT_FORWARD); cudaDeviceSynchronize(); // And finally save the products DragRealBig<<<K*V/NT,NT>>>(DFT_big,Cgamma,CSgamma); cudaDeviceSynchronize(); } // Finds the design matrix times coefficients product void FIND_ZB(float *Z , float * B , float * ZB , float *COVARIATES) { // Calculate the product cublasSgemv('n',HI,HK_star,1.0f,Z,HI,B,1,0.0f,ZB,1); cudaDeviceSynchronize(); // Then save to a vector SendToCovariates<<<CUBLAS_TMP,512>>>(ZB,COVARIATES); cudaDeviceSynchronize(); } // The following function calculates the gradient. // For grad_gamma the minus sign is take care of when multiplying by sigma // For grad_beta and grad_sigma the minus sign is taken care of when the partial sums are summed in the CPU void FIND_GRAD(float *gamma , float *grad_gamma , float *beta , float *grad_beta , float *sigma , float *grad_sigma , float *voxel_tmp , float *ZB , float *Cgamma , float *vol , double *sigma_part , double *beta_part , double *Hsigma_part , double *Hbeta_part , float * COVARIATES , float *TERM_GAMMA , float2 *DFT , float *eigen , cufftHandle plan , float *Hsigma , float *Hbeta , float *HZN , float *Hrho , float *rho , float *CSgamma , double *rho_part , double *Hrho_part , float *grad_rho) { // The first kernel calculates common term for all the variables per voxel. It stores it in voxel_tmp. GradFirst<<<V/NT,NT>>>(voxel_tmp ,sigma ,Cgamma ,vol , DFT , COVARIATES,TERM_GAMMA); cudaDeviceSynchronize(); // The lines below calculate the FFT product that appears in gradient calculations // The vector is already loaded in the FFT object so we just find its IDFT cufftExecC2C(plan, DFT, DFT, CUFFT_INVERSE); cudaDeviceSynchronize(); // Now load the eigenvalues of the square root correlation matrix LoadAddVecSecond<<<K*V/NT,NT>>>(eigen,DFT); cudaDeviceSynchronize(); // And finally find the matrix vector product cufftExecC2C(plan, DFT, DFT, CUFFT_FORWARD); cudaDeviceSynchronize(); // To finish, send the values to the gradient of \gamma DragRealGrad<<<K*V/NT,NT>>>(DFT,grad_gamma,gamma); cudaDeviceSynchronize(); // This series of kernels adds up the partial sums for the gradients of scalar parameters GradSigma<<<NB,NT>>>(voxel_tmp , sigma_part , Cgamma , TERM_GAMMA ); GradBeta<<<NB,NT>>>(voxel_tmp , beta_part); GradRho<<<NB,NT>>>(voxel_tmp,rho_part,CSgamma,TERM_GAMMA); cudaDeviceSynchronize(); // Move the partial sums and the parameters to the CPU for the adding up int s,ss; cudaMemcpy(Hsigma,sigma, HK*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Hrho, rho, HK*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Hbeta, beta, HK_star*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Hsigma_part, sigma_part, NB*HK*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(Hrho_part, rho_part, NB*HK*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(Hbeta_part, beta_part, NB*HK_star*sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Dont get confused. I will find the derivatives inside the parameter vectors. for (s=0 ; s<HK ; s++) { Hrho[s] = Hsigma[s]; Hsigma[s] *= -Htau; } for (s=0 ; s<HK_star ; s++) { Hbeta[s] *= -Htau; Hbeta[s] += HZN[s]; } // First add the partial sums found earlier in the GPU double tmp_sigma[HK]; double tmp_rho[HK]; double tmp_beta[HK_star]; for (ss=0 ; ss<NB ; ss++){ for (s=0 ; s<HK ; s++) { tmp_sigma[s] += Hsigma_part[s+HK*ss]; tmp_rho[s] += Hrho_part[s+HK*ss]; } for (s=0 ; s<HK_star ; s++) { tmp_beta[s] += Hbeta_part[s+HK_star*ss]; } } // And then add the sums to the gradients of the parameters for (s=0 ; s<HK ; s++) { Hsigma[s] += -(float)tmp_sigma[s]; Hrho[s] *= (float)tmp_rho[s]/200; // The 200 appears due to the parametrisation } for (s=0 ; s<HK_star ; s++) { Hbeta[s] += -(float)tmp_beta[s]; } // And finally send everything back to the GPU cudaMemcpy(grad_sigma, Hsigma, HK*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(grad_rho, Hrho, HK*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(grad_beta, Hbeta, HK_star*sizeof(float),cudaMemcpyHostToDevice); cudaDeviceSynchronize(); } // Finds the log-likelihood void FIND_LIK(float *lik_tmp , float *gamma , float *COVARIATES , float * sigma , float *Cgamma , float *TERM_GAMMA , float *vol , double *lik_part , double *Hlik_part , float *ZB , float *HZB , float *Hsigma , float *beta , float *Hbeta , double *likelihood , int *Hcounts ) { // First do the two kernels required in the GPU LikFirst<<<V/NT,NT>>>(lik_tmp,gamma,COVARIATES,sigma,Cgamma,TERM_GAMMA,vol); cudaDeviceSynchronize(); LikSecond<<<NB,NT>>>(lik_tmp,lik_part); cudaDeviceSynchronize(); // Define two variables that will be used int s; double sum=0; // Transfer everything you need to the CPU cudaMemcpy(Hlik_part, lik_part, NB*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(HZB, ZB, HI*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Hsigma, sigma, HK*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Hbeta, beta, HK_star*sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Add the likelihood partial sums together for (s=0 ; s<NB ; s++) { sum += Hlik_part[s]; } // Add the annoying term that appears in the likelihood equation. for (s=0 ; s<HI ; s++) { sum += (double) HZB[s]*Hcounts[s]; } // Finally add the prior contributions for (s=0 ; s<HK ; s++){ sum += -0.5*Htau*Hsigma[s]*Hsigma[s]; } for (s=0 ; s<HK_star ; s++){ sum += -0.5*Htau*Hbeta[s]*Hbeta[s]; } // Now just save the value that you obtained. likelihood[0] = sum; } // If a move of the HMC is accepted then save the new values void SAVE_ORIGINALS(double *likelihood , float *beta , float *ori_beta , float *sigma , float *ori_sigma , float *gamma , float *ori_gamma , float *Cgamma , float *ori_Cgamma , float *grad_beta , float * ori_grad_beta , float *grad_sigma , float *ori_grad_sigma , float *grad_gamma , float *ori_grad_gamma , float *rho, float *ori_rho, float *grad_rho, float *ori_grad_rho, float *CSgamma, float *ori_CSgamma) { // Replace the proposed values with the original likelihood[1] = likelihood[0]; Replace<<<1,K_star>>>(beta,ori_beta); Replace<<<1,K>>>(rho,ori_rho); Replace<<<1,K>>>(sigma,ori_sigma); Replace<<<K*V/NT,NT>>>(gamma,ori_gamma); Replace<<<K*V/NT,NT>>>(Cgamma,ori_Cgamma); Replace<<<K*V/NT,NT>>>(CSgamma,ori_CSgamma); Replace<<<1,K_star>>>(grad_beta,ori_grad_beta); Replace<<<1,K>>>(grad_sigma,ori_grad_sigma); Replace<<<K*V/NT,NT>>>(grad_gamma,ori_grad_gamma); Replace<<<1,K>>>(grad_rho,ori_grad_rho); cudaDeviceSynchronize(); } // This function generates the momentum. CUDA won't generate odd number of variables hence the if statement. void GENERATE_MOMENTUM(float *mom_beta , float *mom_sigma , float *mom_gamma,curandGenerator_t gen , float *mom_beta_even , float *mom_sigma_even , float *mom_gamma_even , float *Mass_sigma , float *Mass_beta, float *mom_rho, float *mom_rho_even, float *Mass_rho) { // Generate the N(0,1) momentum for the variables of interest // Overall means if ( (HK_star%2)==0 ) { curandGenerateNormal(gen , mom_beta , K_star , 0.0f , 1.0f); } else { curandGenerateNormal(gen, mom_beta_even, K_star+1 , 0.0f, 1.0f); Replace<<<1,HK_star>>>(mom_beta_even,mom_beta); } // Marginal standard deviations if ( (HK%2)==0 ) { curandGenerateNormal(gen, mom_sigma, HK , 0.0f, 1.0f); curandGenerateNormal(gen, mom_rho, HK , 0.0f, 1.0f); } else { curandGenerateNormal(gen, mom_sigma_even, HK+1 , 0.0f, 1.0f); Replace<<<1,HK>>>(mom_sigma_even,mom_sigma); curandGenerateNormal(gen, mom_rho_even, HK+1 , 0.0f, 1.0f); Replace<<<1,HK>>>(mom_rho_even,mom_rho); } // gamma vactors curandGenerateNormal(gen, mom_gamma, HK*V , 0.0f, 1.0f); cudaDeviceSynchronize(); // Now scale the scalars by their masses CrossVector<<<1,HK_star>>>(mom_beta,Mass_beta); CrossVector<<<1,HK>>>(mom_sigma,Mass_sigma); CrossVector<<<1,HK>>>(mom_rho, Mass_rho); cudaDeviceSynchronize(); } // Finds the kinetic energy void FIND_KINETIC(float *mom_beta , float *mom_sigma , float *mom_gamma , double *Kinetic , double *kin_part , double *Hkin_part , float *Hmom_beta , float *Hmom_sigma , float *HMass_sigma , float *HMass_beta , float *mom_rho , float *Hmom_rho , float *HMass_rho) { // Execute the kernels and transfer things to the CPU KineticFirst<<<NB,NT>>>(mom_gamma,kin_part); cudaDeviceSynchronize(); cudaMemcpy(Hkin_part,kin_part,NB*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(Hmom_beta,mom_beta,HK_star*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(Hmom_sigma, mom_sigma, HK*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(Hmom_rho , mom_rho , HK*sizeof(float),cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Define the variables that will be used int s; double sum = 0; // Add the Kinetic parts together for (s=0 ; s<NB ; s++) { sum += Hkin_part[s]; } for (s=0 ; s<HK_star ; s++){ sum += (Hmom_beta[s]*Hmom_beta[s])/HMass_beta[s]; } for (s=0 ; s<HK ; s++){ sum += (Hmom_sigma[s]*Hmom_sigma[s])/HMass_sigma[s]; } for (s=0 ; s<HK ; s++){ sum += (Hmom_rho[s]*Hmom_rho[s])/HMass_rho[s]; } // Multiply by half Kinetic[0] = 0.5*sum; } // Updates the momentum vector according to HMC void UPDATE_MOMENTUM(float size , float *mom_beta , float *grad_beta , float *mom_sigma , float *grad_sigma , float *mom_gamma , float * grad_gamma , float *mom_rho , float *grad_rho , float *Sign_rho) { Update<<<1,HK_star>>>(mom_beta,grad_beta,size); Update<<<1,HK>>>(mom_sigma, grad_sigma, size); UpdateSecond<<<1,HK>>>(mom_rho,grad_rho,size,Sign_rho); Update<<<HK*V/NT,NT>>>(mom_gamma,grad_gamma,size); cudaDeviceSynchronize(); } // Updates the parameter vector according to HMC void UPDATE_PARAMS(float size , float *beta , float *mom_beta , float *sigma , float *mom_sigma , float *gamma , float *mom_gamma , float *Mass_sigma , float *Mass_beta , float *rho, float *Hrho , float *mom_rho , float *Mass_rho , float *Sign_rho) { // Update the parameters according to the Leapfrog scheme. Remember the Mass vectors contain standard deviations UpdateScalars<<<1,HK_star>>>(beta,mom_beta,size,Mass_beta); UpdateScalars<<<1,HK>>>(sigma,mom_sigma,size,Mass_sigma); UpdateScalars<<<1,HK>>>(rho,mom_rho,size,Mass_rho); Update<<<HK*V/NT,NT>>>(gamma,mom_gamma,size); cudaDeviceSynchronize(); // Bring the correlation parameters back to see if the boundary conditions are satisfied cudaMemcpy(Hrho , rho , HK*sizeof(float) , cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); int s; float tmp; for (s=0 ; s<HK ; s++) { // Upper bounds if (Hrho[s] > RHO_UPPER) { tmp = RHO_UPPER - (Hrho[s]-RHO_UPPER); SetElement<<<1,1>>>(rho,s,tmp); SetElement<<<1,1>>>(Sign_rho , s , -1.0f); } // Lower bounds if (Hrho[s] < RHO_LOWER) { tmp = RHO_LOWER + (RHO_LOWER-Hrho[s]); SetElement<<<1,1>>>(rho,s,tmp); SetElement<<<1,1>>>(Sign_rho,s,-1.0f); } } cudaDeviceSynchronize(); } // For when a move is rejected void REVERT_STATE(float *beta , float *ori_beta , float *sigma , float *ori_sigma , float *gamma , float *ori_gamma , float *Cgamma , float *ori_Cgamma , float *grad_beta , float * ori_grad_beta , float *grad_sigma , float *ori_grad_sigma , float *grad_gamma , float *ori_grad_gamma, float *rho, float *ori_rho, float *grad_rho, float *ori_grad_rho, float *CSgamma, float *ori_CSgamma) { Replace<<<1,K_star>>>(ori_beta,beta); Replace<<<1,K>>>(ori_sigma,sigma); Replace<<<1,K>>>(ori_rho,rho); Replace<<<K*V/NT,NT>>>(ori_gamma,gamma); Replace<<<K*V/NT,NT>>>(ori_Cgamma,Cgamma); Replace<<<K*V/NT,NT>>>(ori_CSgamma,CSgamma); Replace<<<1,K_star>>>(ori_grad_beta,grad_beta); Replace<<<1,K>>>(ori_grad_sigma,grad_sigma); Replace<<<1,K>>>(ori_grad_rho,grad_rho); Replace<<<K*V/NT,NT>>>(ori_grad_gamma,grad_gamma); cudaDeviceSynchronize(); } // Find the kinetic energy for the first part where only the gamma vectors are updated void FIND_KINETIC_GAMMA(float *mom_gamma , double *Kinetic , double *kin_part , double *Hkin_part) { // Execute the kernels and transfer to the CPU KineticFirst<<<NB,NT>>>(mom_gamma,kin_part); cudaDeviceSynchronize(); cudaMemcpy(Hkin_part,kin_part,NB*sizeof(double),cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Define the variables that will be used int s=0; double sum=0; // Add things up for (s=0 ; s<NB ; s++) { sum += Hkin_part[s]; } // Multiply by half Kinetic[0] = 0.5*sum; } // Saves a snapshot of the parameters void SNAPSHOT(float *Hsigma , float *ori_sigma , float *Hrho , float *ori_rho , float *Hbeta , float *ori_beta , float *big , float *ori_gamma ) { FILE *STARTING; int kk; /* Transfer the parameters to the CPU */ cudaMemcpy(Hsigma,ori_sigma,HK*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(Hrho ,ori_rho,HK*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(Hbeta,ori_beta,HK_star*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(big,ori_gamma,HK*V_extended*sizeof(float),cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); /* Write to the file */ STARTING = fopen("./outputs/starting.txt","w"); for (kk=0 ; kk<HK_star; kk++) { fprintf(STARTING,"%.10f\n",Hbeta[kk]); } for (kk=0 ; kk<HK ; kk++) { fprintf(STARTING,"%.10f\n",Hsigma[kk]); } for (kk=0 ; kk<HK ; kk++) { fprintf(STARTING,"%.10f\n",Hrho[kk]); } for (kk=0 ; kk<HK*V_extended ; kk++) { fprintf(STARTING,"%.10f\n",big[kk]); } fclose(STARTING); } /* Updates the study random effects */ void UPDATE_RFX(double *Hrfx , double *rfx, float *HZB, float *ZB, int *Hcounts, float *sigma, float *Cgamma, float *vol, float *COVARIATES, unsigned long *RNG, int* author) { /* Find the sum over voxels and move to host */ rfxSum<<<CUBLAS_TMP,512>>>(sigma , Cgamma , vol , COVARIATES , rfx ); cudaDeviceSynchronize(); cudaMemcpy(Hrfx,rfx,HI*sizeof(double),cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); /* Add the constant terms */ int i; cudaMemcpy(HZB, ZB, HI*sizeof(float), cudaMemcpyDeviceToHost); for (i=0 ; i<HI ; i++) { Hrfx[i] *= VOXEL_VOL*exp((double)HZB[i]); } /* Sample the random effect terms from their Gamma full conditionals */ int n_authors = author[HI-1] + 1 ; int author_first, author_last, flag, j; double shape, rate, tmp; for (i=0 ; i<n_authors ; i++) { /* Find the first study from the i-th author */ flag=0; j=-1; while (flag==0){ j += 1; flag = ( i == (author[j]) ); } author_first = j; /* Find the last study from the i-th author */ for (j=author_first ; j<HI ; j++) { if (author[j] == i){ author_last = j; } } /* Find the shape and the rate of the Gamma full conditional */ shape = rfx_phi; rate = rfx_phi; for (j=author_first ; j<=author_last ; j++) { shape += (double)Hcounts[j]; rate += Hrfx[j]; } /* Draw the new random effect */ tmp = rgamma(shape,rate,RNG); /* Save the draw */ for (j=author_first ; j<=author_last ; j++) { Hrfx[j] = tmp; } /* Print something to make sure */ if (i==100) { printf("\nStudy %d First %d Last %d Shape %.5f Rate %.5f",i,author_first,author_last,shape,rate); } } /* Copy the random effects to the GPU */ cudaMemcpy(rfx,Hrfx,HI*sizeof(double),cudaMemcpyHostToDevice); cudaDeviceSynchronize(); /* Copy the random effects in the COVARIATES array */ SaveRFX<<<CUBLAS_TMP,512>>>(rfx,COVARIATES); cudaDeviceSynchronize(); }
ac22ceed9785800307e831347bb5db84e6ad9e80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaGather.h" #include "data_gpu.h" #include "bvh.h" #include "SceneProbe.h" #include "CudaResources/cudaTimer.hpp" #include "CudaResources/cudaUtil.hpp" #include "Utils/stream.h" #include "CConfigManager.h" #include "CCamera.h" #define THREADS_X 16 #define THREADS_Y 16 #define MAX_NUM_MATERIALS 20 #define STACK_SIZE 32 #define NUM_THREADS_GATHER_CLUSTER_LIGHTS 32 using namespace cuda; template<typename T> void printDeviceVector(std::string const& text, thrust::device_vector<T> const& vector) { std::cout << text << " "; for (int i = 0; i < vector.size(); i++) { std::cout << vector[i] << ", "; } std::cout << std::endl; } struct MAT { __device__ __host__ MAT(float3 const& d, float3 const& s, float e) : diffuse(d), specular(s), exponent(e) { } float3 diffuse; float3 specular; float exponent; }; inline __device__ float G(float3 const& p1, float3 const& n1, float3 const& p2, float3 const& n2) { float3 n_1 = normalize(n1); float3 n_2 = normalize(n2); float3 w = normalize(p2 - p1); float cos_theta_1 = clamp(dot(n_1, w), 0.f, 1.f); float cos_theta_2 = clamp(dot(n_2, -w), 0.f, 1.f); float dist = length(p2 - p1); return (cos_theta_1 * cos_theta_2) / (dist * dist); } inline __device__ float3 f_r(float3 const& w_i, float3 const& w_o, float3 const& n, MAT const& mat) { const float3 d = CUDA_ONE_OVER_PI * mat.diffuse; float3 s = make_float3(0.f); if (length(mat.specular) > 0.f) { const float cos_theta = max(0.f, dot(reflect(-w_i, n), w_o)); s = 0.5f * CUDA_ONE_OVER_PI * (mat.exponent+2.f) * pow(cos_theta, mat.exponent) * mat.specular; } return d+s; } inline __device__ float3 f_r(float3 const& from, float3 const& over, float3 const& to, float3 const& n, MAT const& mat) { const float3 w_i = normalize(from - over); const float3 w_o = normalize(to - over); return f_r(w_i, w_o, n, mat); } inline __device__ float3 getRadiance(float3 const& pos, float3 const& norm, MAT const& mat, float3 const& avpl_pos, float3 const& avpl_norm, float3 const& avpl_w, float3 const& avpl_L, MAT const& avpl_mat, float3 const& camPos) { const float3 direction = normalize(pos - avpl_pos); float3 brdf_light = f_r(-avpl_w, direction, avpl_norm, avpl_mat); // check for light source Avpl if(length(avpl_w) == 0.f) brdf_light = make_float3(1.f); const float3 brdf = f_r(avpl_pos, pos, camPos, norm, mat); return avpl_L * brdf_light * G(pos, norm, avpl_pos, avpl_norm) * brdf; } inline __device__ bool intersectLinePlane(float3 const& dir_line, float3 const& point_line, float3 const& normal_plane, float3 const& point_plane, float &t) { const float denom = dot(dir_line, normal_plane); if (denom == 0.f) return false; t = dot(point_plane - point_line, normal_plane) / denom; return true; } inline __device__ float3 getAntiradiance(float3 const& pos, float3 const& norm, MAT const& mat, float3 const& avpl_pos, float3 const& avpl_norm, float3 const& avpl_w, float3 const& avpl_A, MAT const& avpl_mat, float3 const& camPos, float radius) { float3 antirad = make_float3(0.f); float d = 0.f; if (!intersectLinePlane(avpl_w, avpl_pos, norm, pos, d) || d <= 0.f) { return antirad; } const float3 hitPoint = avpl_pos + d * normalize(avpl_w); if (length(hitPoint - pos) >= radius) { return antirad; } if (dot(normalize(hitPoint - avpl_pos), avpl_w) <= 0.f) { return antirad; } if (dot(norm, avpl_w) >= 0.f) { return antirad; } const float3 direction = normalize(pos - avpl_pos); if (dot(direction, avpl_norm) >= 0.f) { return antirad; } const float cos_theta = max(0.f, dot(norm, -direction)); const float3 brdf = f_r(avpl_pos, pos, camPos, norm, mat); return avpl_A * /*cos_theta **/ brdf / (CUDA_PI * radius * radius); } inline __device__ float3 getRadiance(float3 const& pos, float3 const& norm, MAT const& mat, MAT const& avpl_mat, int idx, AvplBvhNodeDataParam* dataParam, float3 const& camPos) { const float3 avpl_pos = dataParam->position[idx]; const float3 avpl_norm = dataParam->normal[idx]; const float3 avpl_incRad = dataParam->incRadiance[idx]; const float3 avpl_incDir = dataParam->incDirection[idx]; return getRadiance(pos, norm, mat, avpl_pos, avpl_norm, avpl_incDir, avpl_incRad, avpl_mat, camPos); } __global__ void kernel_radiance( hipSurfaceObject_t outRadiance, hipSurfaceObject_t inPositions, hipSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const bool calcPixelValue = (x < width && y < height); float3 pos = make_float3(1.f); float3 norm = make_float3(1.f); int materialIndex = 0; if (calcPixelValue) { // fetch gbuffer float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); norm= make_float3(data); materialIndex = int(data.w); } float3 outRad = make_float3(0.f); float3 outAntirad = make_float3(0.f); const int numAvpls = avpls->numAvpls; const int threadId = threadIdx.x + threadIdx.y * blockDim.x; const int chunkSize = THREADS_X * THREADS_Y; const int numChunks = max(numAvpls / chunkSize, 0); __shared__ float3 avpl_position[chunkSize]; __shared__ float3 avpl_normal[chunkSize]; __shared__ float3 avpl_incRadiance[chunkSize]; __shared__ float3 avpl_incDirection[chunkSize]; __shared__ int avpl_materialIndex[chunkSize]; __shared__ float3 material_diffuse[MAX_NUM_MATERIALS]; __shared__ float3 material_specular[MAX_NUM_MATERIALS]; __shared__ float material_exponent[MAX_NUM_MATERIALS]; if (threadId < materials->numMaterials && threadId < MAX_NUM_MATERIALS) { material_diffuse[threadId] = materials->diffuse[threadId]; material_specular[threadId] = materials->specular[threadId]; material_exponent[threadId] = materials->exponent[threadId]; } syncthreads(); MAT mat(material_diffuse[materialIndex], material_specular[materialIndex], material_exponent[materialIndex]); for (int chunk = 0; chunk < numChunks; ++chunk) { // load chunk into shared memory const int index = chunkSize * chunk + threadId; avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_incRadiance[threadId] = avpls->incRadiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; syncthreads(); if (!calcPixelValue) continue; // process avpls for(int i = 0; i < chunkSize; ++i) { int matIndex = avpl_materialIndex[i]; MAT avpl_mat(material_diffuse[matIndex], material_specular[matIndex], material_exponent[matIndex]); outRad += getRadiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_incRadiance[i], avpl_mat, camPos); } } // remainig avpls const int index = chunkSize * numChunks + threadId; if (index < numAvpls) { avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_incRadiance[threadId] = avpls->incRadiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; } syncthreads(); const int remaining = numAvpls - numChunks * chunkSize; for (int i = 0; i < remaining; ++i) { MAT avpl_mat(material_diffuse[avpl_materialIndex[i]], material_specular[avpl_materialIndex[i]], material_exponent[avpl_materialIndex[i]]); outRad += getRadiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_incRadiance[i], avpl_mat, camPos); } if (!calcPixelValue) return; float4 out = make_float4(outRad, 1.f); surf2Dwrite(out, outRadiance, x * sizeof(float4), y); } __global__ void kernel_radiance_simple( hipSurfaceObject_t outRadiance, hipSurfaceObject_t inPositions, hipSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm = make_float3(data); const int materialIndex = int(data.w); float3 outRad = make_float3(0.f); const int numAvpls = avpls->numAvpls; MAT mat(materials->diffuse[materialIndex], materials->specular[materialIndex], materials->exponent[materialIndex]); for (int i = 0; i < numAvpls; ++i) { const int avpl_mat_index = avpls->materialIndex[i]; MAT avpl_mat(materials->diffuse[avpl_mat_index], materials->specular[avpl_mat_index], materials->exponent[avpl_mat_index]); outRad += getRadiance(pos, norm, mat, avpls->position[i], avpls->normal[i], avpls->incDirection[i], avpls->incRadiance[i], avpl_mat, camPos); } float4 out = make_float4(outRad, 1.f); surf2Dwrite(out, outRadiance, x * sizeof(float4), y); } __global__ void kernel_bvh( hipSurfaceObject_t outRadiance, hipSurfaceObject_t inPositions, hipSurfaceObject_t inNormals, BvhParam* bvhParam, AvplBvhNodeDataParam* dataParam, MaterialsGpuParam* materials, float3 camPos, int bvhLevel, float refThresh, bool genDebugInfo, uint2 debugPixel, int* usedAvpls, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } bool generateDebugInfo = (x == debugPixel.x) && (y == debugPixel.y) && genDebugInfo; float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos= make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm= make_float3(data); const int materialIndex = int(data.w); const int threadId = threadIdx.x + threadIdx.y * blockDim.x; __shared__ float3 material_diffuse[MAX_NUM_MATERIALS]; __shared__ float3 material_specular[MAX_NUM_MATERIALS]; __shared__ float material_exponent[MAX_NUM_MATERIALS]; if (threadId < materials->numMaterials && threadId < MAX_NUM_MATERIALS) { material_diffuse[threadId] = materials->diffuse[threadId]; material_specular[threadId] = materials->specular[threadId]; material_exponent[threadId] = materials->exponent[threadId]; } syncthreads(); MAT mat(material_diffuse[materialIndex], material_specular[materialIndex], material_exponent[materialIndex]); float3 outRad = make_float3(0.f); /* for (int i = 0; i < bvhParam->numLeafs + bvhParam->numNodes; ++i) { int matIndex = dataParam->materialIndex[i]; MAT avpl_mat(material_diffuse[matIndex], material_specular[matIndex], material_exponent[matIndex]); outRad += getRadiance(pos, norm, mat, dataParam->position[i], dataParam->normal[i], dataParam->incDirection[i], dataParam->incRadiance[i], avpl_mat, camPos); } */ int stack[STACK_SIZE]; int stack_depth[STACK_SIZE]; stack[0] = -1; stack_depth[0] = 0; int stack_ptr = 1; bool error = false; while (stack_ptr > 0) { stack_ptr--; const int nodeIndex = stack[stack_ptr]; const int nodeDepth = stack_depth[stack_ptr]; if (nodeIndex >= 0) { // leaf const int matIdx = dataParam->materialIndex[nodeIndex]; MAT avpl_mat(material_diffuse[matIdx], material_specular[matIdx], material_exponent[matIdx]); outRad += getRadiance(pos, norm, mat, avpl_mat, nodeIndex, dataParam, camPos); if (generateDebugInfo) { usedAvpls[nodeIndex] = 1; } } else { // inner node const int idx = bvhParam->numLeafs-(nodeIndex+1); const BvhNode bvhNode = bvhParam->nodes[-(nodeIndex+1)]; if (-(nodeIndex+1) < 0 || -(nodeIndex+1) >= bvhParam->numNodes) { error = true; break; } const bool clusterVisible = (dot(norm, normalize(bvhNode.bbMax - pos)) > 0) || (dot(norm, normalize(bvhNode.bbMin - pos)) > 0); if (!clusterVisible) continue; const float3 clusterToPoint = normalize(dataParam->position[idx] - pos); const float radius = 0.5f * length(bvhNode.bbMax - bvhNode.bbMin); const float dist = length(dataParam->position[idx] - pos); const float solidAngle = 2.f * CUDA_PI * (1.f - dist / (sqrt(radius * radius + dist * dist))); const bool useNode = (solidAngle < refThresh); if (useNode) { const int matIdx = dataParam->materialIndex[idx]; MAT avpl_mat(material_diffuse[matIdx], material_specular[matIdx], material_exponent[matIdx]); outRad += getRadiance(pos, norm, mat, avpl_mat, idx, dataParam, camPos); if (generateDebugInfo) { usedAvpls[idx] = 1; } } else { stack[stack_ptr] = bvhNode.left; stack_depth[stack_ptr] = nodeDepth + 1; stack_ptr++; stack[stack_ptr] = bvhNode.right; stack_depth[stack_ptr] = nodeDepth + 1; stack_ptr++; if (stack_ptr >= STACK_SIZE) { error = true; break; } } } } float4 out = make_float4(outRad, 1.f); if (error) { out = make_float4(100000.f, 0.f, 100000.f, 1.f); } surf2Dwrite(out, outRadiance, x * sizeof(float4), y); } __global__ void kernel_antiradiance( hipSurfaceObject_t outAntiradiance, hipSurfaceObject_t inPositions, hipSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, float photonRadius, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const bool calcPixelValue = (x < width && y < height); float3 pos = make_float3(1.f); float3 norm = make_float3(1.f); int materialIndex = 0; if (calcPixelValue) { // fetch gbuffer float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); norm= make_float3(data); materialIndex = int(data.w); } float3 outAntirad = make_float3(0.f); const int numAvpls = avpls->numAvpls; const int threadId = threadIdx.x + threadIdx.y * blockDim.x; const int chunkSize = THREADS_X * THREADS_Y; const int numChunks = max(numAvpls / chunkSize, 0); __shared__ float3 avpl_position[chunkSize]; __shared__ float3 avpl_normal[chunkSize]; __shared__ float3 avpl_antiradiance[chunkSize]; __shared__ float3 avpl_incDirection[chunkSize]; __shared__ int avpl_materialIndex[chunkSize]; __shared__ float3 material_diffuse[MAX_NUM_MATERIALS]; __shared__ float3 material_specular[MAX_NUM_MATERIALS]; __shared__ float material_exponent[MAX_NUM_MATERIALS]; if (threadId < materials->numMaterials && threadId < MAX_NUM_MATERIALS) { material_diffuse[threadId] = materials->diffuse[threadId]; material_specular[threadId] = materials->specular[threadId]; material_exponent[threadId] = materials->exponent[threadId]; } syncthreads(); MAT mat(material_diffuse[materialIndex], material_specular[materialIndex], material_exponent[materialIndex]); for (int chunk = 0; chunk < numChunks; ++chunk) { // load chunk into shared memory const int index = chunkSize * chunk + threadId; avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_antiradiance[threadId] = avpls->antiradiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; syncthreads(); if (!calcPixelValue) continue; // process avpls for(int i = 0; i < chunkSize; ++i) { int matIndex = avpl_materialIndex[i]; MAT avpl_mat(material_diffuse[matIndex], material_specular[matIndex], material_exponent[matIndex]); outAntirad += getAntiradiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_antiradiance[i], avpl_mat, camPos, photonRadius); } } // remainig avpls const int index = chunkSize * numChunks + threadId; if (index < numAvpls) { avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_antiradiance[threadId] = avpls->antiradiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; } syncthreads(); const int remaining = numAvpls - numChunks * chunkSize; for (int i = 0; i < remaining; ++i) { MAT avpl_mat(material_diffuse[avpl_materialIndex[i]], material_specular[avpl_materialIndex[i]], material_exponent[avpl_materialIndex[i]]); outAntirad += getAntiradiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_antiradiance[i], avpl_mat, camPos, photonRadius); } if (!calcPixelValue) return; float4 out = make_float4(outAntirad, 1.f); surf2Dwrite(out, outAntiradiance, x * sizeof(float4), y); } __global__ void kernel_antiradiance_simple( hipSurfaceObject_t outAntiradiance, hipSurfaceObject_t inPositions, hipSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, float photonRadius, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm = make_float3(data); const int materialIndex = int(data.w); float3 outAntirad = make_float3(0.f); MAT mat(materials->diffuse[materialIndex], materials->specular[materialIndex], materials->exponent[materialIndex]); for (int i = 0; i < avpls->numAvpls; ++i) { const int avpl_mat_index = avpls->materialIndex[i]; MAT avpl_mat(materials->diffuse[avpl_mat_index], materials->specular[avpl_mat_index], materials->exponent[avpl_mat_index]); outAntirad += getAntiradiance(pos, norm, mat, avpls->position[i], avpls->normal[i], avpls->incDirection[i], avpls->antiradiance[i], avpl_mat, camPos, photonRadius); } float4 out = make_float4(outAntirad, 1.f); surf2Dwrite(out, outAntiradiance, x * sizeof(float4), y); } __global__ void kernel_antiradiance_clusterred( dim3 dimensions, hipSurfaceObject_t outAntiradiance, hipSurfaceObject_t inPositions, hipSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, float photonRadius, int* numLightsPerCluster, int* lightsPerCluster, int* clusterIds, int maxNumLightsPerCluster, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm = make_float3(data); const int materialIndex = int(data.w); MAT mat(materials->diffuse[materialIndex], materials->specular[materialIndex], materials->exponent[materialIndex]); float3 outAntirad = make_float3(0.f); const int clusterId = clusterIds[y * dimensions.x + x]; const int numLights = numLightsPerCluster[clusterId]; const int clusterOffset = clusterId * maxNumLightsPerCluster; for (int i = 0; i < numLights; ++i) { int lightIdx = lightsPerCluster[clusterOffset + i]; int matIndex = avpls->materialIndex[lightIdx]; MAT avpl_mat(materials->diffuse[matIndex], materials->specular[matIndex], materials->exponent[matIndex]); outAntirad += getAntiradiance(pos, norm, mat, avpls->position[lightIdx], avpls->normal[lightIdx], avpls->incDirection[lightIdx], avpls->antiradiance[lightIdx], avpl_mat, camPos, photonRadius); } float4 out = make_float4(outAntirad, 1.f); surf2Dwrite(out, outAntiradiance, x * sizeof(float4), y); } // http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms __device__ bool intersectBB(float3 const& bbMin, float3 const& bbMax, float3 const& o, float3 const& d) { const float dirfracx = 1.0f / d.x; const float dirfracy = 1.0f / d.y; const float dirfracz = 1.0f / d.z; const float t1 = (bbMin.x - o.x) * dirfracx; const float t2 = (bbMax.x - o.x) * dirfracx; const float t3 = (bbMin.y - o.y) * dirfracy; const float t4 = (bbMax.y - o.y) * dirfracy; const float t5 = (bbMin.z - o.z) * dirfracz; const float t6 = (bbMax.z - o.z) * dirfracz; const float tmin = max(max(min(t1, t2), min(t3, t4)), min(t5, t6)); const float tmax = min(min(max(t1, t2), max(t3, t4)), max(t5, t6)); if (tmax < 0 || tmin > tmax) { return false; } return true; } __global__ void kernel_gather_cluster_lights( AvplsGpuParam* avpls, int* numLightsPerCluster, int* lightsPerCluster, float photonRadius, int maxNumLightsPerCluster, float3* clusterBBMin, float3* clusterBBMax, int numClusters) { const int clusterId = blockIdx.x * blockDim.x + threadIdx.x; const bool validClusterId = clusterId < numClusters; const float3 bbMin = clusterBBMin[clusterId] - 2.f * make_float3(photonRadius); const float3 bbMax = clusterBBMax[clusterId] + 2.f * make_float3(photonRadius); const int clusterOffset = maxNumLightsPerCluster * clusterId; int numClusterLights = 0; const int numLights = avpls->numAvpls; const int threadId = threadIdx.x; const int chunkSize = NUM_THREADS_GATHER_CLUSTER_LIGHTS; const int numChunks = numLights / chunkSize; __shared__ float3 avpl_position[chunkSize]; __shared__ float3 avpl_incDirection[chunkSize]; for (int chunk = 0; chunk < numChunks; ++chunk) { // load chunk into shared memory const int index = chunkSize * chunk + threadId; avpl_position[threadId] = avpls->position[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; syncthreads(); if (!validClusterId) continue; for(int i = 0; i < chunkSize; ++i) { // check for hit with cluster if (intersectBB(bbMin, bbMax, avpl_position[i], avpl_incDirection[i])) { const int idx = chunkSize * chunk + i; lightsPerCluster[clusterOffset + numClusterLights] = idx; numClusterLights++; } } } syncthreads(); if (!validClusterId) return; // remainig avpls for (int i = numChunks * chunkSize; i < numLights; ++i) { if (intersectBB(bbMin, bbMax, avpls->position[i], avpls->incDirection[i])) { lightsPerCluster[clusterOffset + numClusterLights] = i; numClusterLights++; } } numLightsPerCluster[clusterId] = numClusterLights; } __global__ void kernel_gather_cluster_lights_simple( AvplsGpuParam* avpls, int* numLightsPerCluster, int* lightsPerCluster, float photonRadius, int maxNumLightsPerCluster, float3* clusterBBMin, float3* clusterBBMax, int numClusters) { const int clusterId = blockIdx.x * blockDim.x + threadIdx.x; if (clusterId >= numClusters) { return; } const float3 bbMin = clusterBBMin[clusterId] - 2.f * make_float3(photonRadius); const float3 bbMax = clusterBBMax[clusterId] + 2.f * make_float3(photonRadius); const int clusterOffset = maxNumLightsPerCluster * clusterId; int numClusterLights = 0; for (int i = 0; i < avpls->numAvpls; ++i) { if (intersectBB(bbMin, bbMax, avpls->position[i], avpls->incDirection[i])) { lightsPerCluster[clusterOffset + numClusterLights] = i; numClusterLights++; } } numLightsPerCluster[clusterId] = numClusterLights; } __global__ void kernel_combine( hipSurfaceObject_t outResult, hipSurfaceObject_t inRadiance, hipSurfaceObject_t inAntiradiance, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 radiance; surf2Dread(&radiance, inRadiance, x * sizeof(float4), y); float4 antiradiance; surf2Dread(&antiradiance, inAntiradiance, x * sizeof(float4), y); //float4 out = max(make_float4(0.f), radiance - antiradiance); float4 out = radiance - antiradiance; surf2Dwrite(out, outResult, x * sizeof(float4), y); } CudaGather::CudaGather(CCamera* camera, GLuint glPositonTexture, GLuint glNormalTexture, GLuint glResultOutputTexture, GLuint glRadianceOutputTexture, GLuint glAntiradianceOutputTexture, std::vector<MATERIAL> const& materials, COGLUniformBuffer* ubTransform, CConfigManager* confManager) : m_camera(camera), m_width(camera->GetWidth()), m_height(camera->GetHeight()), m_ubTransform(ubTransform), m_confManager(confManager) { m_positionResource.reset(new CudaGraphicsResource(glPositonTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsNone)); m_normalResource.reset(new CudaGraphicsResource(glNormalTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsNone)); m_antiradianceOutputResource.reset(new CudaGraphicsResource(glAntiradianceOutputTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore)); m_radianceOutputResource.reset(new CudaGraphicsResource(glRadianceOutputTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore)); m_resultOutputResource.reset(new CudaGraphicsResource(glResultOutputTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore)); m_materialsGpu.reset(new MaterialsGpu(materials)); } CudaGather::~CudaGather() { m_materialsGpu.reset(nullptr); } void CudaGather::run(std::vector<Avpl> const& avpls, glm::vec3 const& cameraPosition, SceneProbe* sceneProbe, float sceneExtent, bool profile) { CudaGraphicsResourceMappedArray positionsMapped(m_positionResource.get()); CudaGraphicsResourceMappedArray normalsMapped(m_normalResource.get()); CudaGraphicsResourceMappedArray radianceOutMapped(m_radianceOutputResource.get()); CudaGraphicsResourceMappedArray antiradianceOutMapped(m_antiradianceOutputResource.get()); CudaGraphicsResourceMappedArray resultOutMapped(m_resultOutputResource.get()); CudaTimer timer; timer.start(); AvplsGpu avplsGpu(avpls); timer.stop(); if(profile) std::cout << "transfer avpl data to gpu took: " << timer.getTime() << "ms" << std::endl; //////////////////////////////////////////////////////////////////////// // gather radiance //////////////////////////////////////////////////////////////////////// if (m_confManager->GetConfVars()->useLightCuts) { bool generateDebugInfo = m_confManager->GetConfVars()->UseDebugMode && sceneProbe; cuda::CudaTimer buildBvh; buildBvh.start(); std::unique_ptr<AvplBvh> avplBvh = std::unique_ptr<AvplBvh>(new AvplBvh(avpls, false)); buildBvh.stop(); cuda::CudaTimer gather; gather.start(); glm::uvec2 pixel(0, 0); if (sceneProbe) pixel = sceneProbe->getPixel(); thrust::device_vector<int> usedAvplsGpu(avplBvh->getBvhData()->numLeafs + avplBvh->getBvhData()->numNodes); thrust::host_vector<int> usedAvplsCpu(avplBvh->getBvhData()->numLeafs + avplBvh->getBvhData()->numNodes); thrust::fill(usedAvplsGpu.begin(), usedAvplsGpu.end(), 0); // Invoke kernel dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( kernel_bvh), dim3(dimGrid), dim3(dimBlock), 0, 0, radianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplBvh->getBvhParam(), avplBvh->getAvplBvhNodeDataParam(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), m_confManager->GetConfVars()->bvhLevel, m_confManager->GetConfVars()->ClusterRefinementThreshold, generateDebugInfo, make_uint2(pixel), thrust::raw_pointer_cast(&usedAvplsGpu[0]), m_width, m_height); if (generateDebugInfo) { std::vector<glm::vec3> positions; std::vector<glm::vec3> colors; std::vector<glm::vec3> bbMins; std::vector<glm::vec3> bbMaxs; int numLeafs = avplBvh->getBvhData()->numLeafs; thrust::copy(usedAvplsGpu.begin(), usedAvplsGpu.end(), usedAvplsCpu.begin()); for (int i = 0; i < usedAvplsCpu.size(); ++i) { if (usedAvplsCpu[i] == 1) { glm::vec3 pos(make_vec3(avplBvh->getAvplBvhNodeData()->position[i])); positions.push_back(pos); if (i < numLeafs) { bbMins.push_back(pos); bbMaxs.push_back(pos); colors.push_back(glm::vec3(1.f, 0.f, 0.f)); } else { colors.push_back(glm::vec3(0.f, 1.f, 0.f)); int idx = i - numLeafs; glm::vec3 bbmin(make_vec3(avplBvh->getNode(idx).bbMin)); glm::vec3 bbmax(make_vec3(avplBvh->getNode(idx).bbMax)); bbMins.push_back(bbmin); bbMaxs.push_back(bbmax); } } } m_pointCloud.reset(new PointCloud(positions, colors, m_ubTransform, m_confManager->GetConfVars()->lightRadiusScale * sceneExtent / 100.f)); m_aabbCloud.reset(new AABBCloud(bbMins, bbMaxs, m_ubTransform)); } gather.stop(); if(profile) std::cout << "gather build bvh took: " << buildBvh.getTime() << "ms" << std::endl; if(profile) std::cout << "gather radiance with lightcuts took: " << gather.getTime() << "ms" << std::endl; } else { CudaTimer gather; gather.start(); // Invoke kernel dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( kernel_radiance_simple), dim3(dimGrid), dim3(dimBlock), 0, 0, radianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplsGpu.param->getDevicePtr(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), m_width, m_height); gather.stop(); if(profile) std::cout << "gather radiance without lightcuts took: " << gather.getTime() << "ms" << std::endl; } //////////////////////////////////////////////////////////////////////// // gather antiradiance //////////////////////////////////////////////////////////////////////// { const float photonRadius = m_confManager->GetConfVars()->photonRadiusScale * sceneExtent / 100.f; if (m_confManager->GetConfVars()->useClusteredDeferred) { const int maxNumLightsPerCluster = avpls.size(); thrust::device_vector<int> numLightsPerCluster(m_visiblePointsBvh->m_numClusters, 0); thrust::device_vector<int> lightsPerCluster(maxNumLightsPerCluster * m_visiblePointsBvh->m_numClusters, 0); { // gather lights per cluster CudaTimer timer; timer.start(); if (m_confManager->GetConfVars()->gatherClusterLightsSimple) { dim3 dimBlock(128); dim3 dimGrid((m_visiblePointsBvh->m_numClusters + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( kernel_gather_cluster_lights_simple), dim3(dimGrid), dim3(dimBlock), 0, 0, avplsGpu.param->getDevicePtr(), thrust::raw_pointer_cast(&numLightsPerCluster[0]), thrust::raw_pointer_cast(&lightsPerCluster[0]), photonRadius, maxNumLightsPerCluster, thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMin[0])), thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMax[0])), m_visiblePointsBvh->m_numClusters); } else { dim3 dimBlock(NUM_THREADS_GATHER_CLUSTER_LIGHTS); dim3 dimGrid((m_visiblePointsBvh->m_numClusters + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( kernel_gather_cluster_lights), dim3(dimGrid), dim3(dimBlock), 0, 0, avplsGpu.param->getDevicePtr(), thrust::raw_pointer_cast(&numLightsPerCluster[0]), thrust::raw_pointer_cast(&lightsPerCluster[0]), photonRadius, maxNumLightsPerCluster, thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMin[0])), thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMax[0])), m_visiblePointsBvh->m_numClusters); } timer.stop(); if(profile) std::cout << "gather lights per cluster: " << timer.getTime() << "ms" << std::endl; } // gather antiradiance CudaTimer gather; gather.start(); dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); dim3 dimensions(m_width, m_height); hipLaunchKernelGGL(( kernel_antiradiance_clusterred), dim3(dimGrid), dim3(dimBlock), 0, 0, dimensions, antiradianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplsGpu.param->getDevicePtr(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), photonRadius, thrust::raw_pointer_cast(&numLightsPerCluster[0]), thrust::raw_pointer_cast(&lightsPerCluster[0]), thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterIdBuffer[0])), maxNumLightsPerCluster, m_width, m_height); gather.stop(); if(profile) std::cout << "gather antiradiance took: " << gather.getTime() << "ms" << std::endl; } else{ CudaTimer gather; gather.start(); dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( kernel_antiradiance), dim3(dimGrid), dim3(dimBlock), 0, 0, antiradianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplsGpu.param->getDevicePtr(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), m_confManager->GetConfVars()->photonRadiusScale * sceneExtent / 100.f, m_width, m_height); gather.stop(); if(profile) std::cout << "gather antiradiance took: " << gather.getTime() << "ms" << std::endl; } } //////////////////////////////////////////////////////////////////////// // combine radiance and antiradiance //////////////////////////////////////////////////////////////////////// { CudaTimer timer; timer.start(); dim3 dimBlock(32, 32); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( kernel_combine), dim3(dimGrid), dim3(dimBlock), 0, 0, resultOutMapped.getCudaSurfaceObject(), radianceOutMapped.getCudaSurfaceObject(), antiradianceOutMapped.getCudaSurfaceObject(), m_width, m_height); timer.stop(); if(profile) std::cout << "combine radiance and antiradiance took: " << timer.getTime() << "ms" << std::endl; } } void CudaGather::rebuildVisiblePointsBvh() { CudaGraphicsResourceMappedArray positionsMapped(m_positionResource.get()); CudaGraphicsResourceMappedArray normalsMapped(m_normalResource.get()); m_visiblePointsBvh.reset(new VisiblePointsBvh( positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), m_width, m_height, false, make_float3(m_camera->GetPosition()), m_camera->getZNear(), m_camera->getTheta())); }
ac22ceed9785800307e831347bb5db84e6ad9e80.cu
#include "cudaGather.h" #include "data_gpu.h" #include "bvh.h" #include "SceneProbe.h" #include "CudaResources/cudaTimer.hpp" #include "CudaResources/cudaUtil.hpp" #include "Utils/stream.h" #include "CConfigManager.h" #include "CCamera.h" #define THREADS_X 16 #define THREADS_Y 16 #define MAX_NUM_MATERIALS 20 #define STACK_SIZE 32 #define NUM_THREADS_GATHER_CLUSTER_LIGHTS 32 using namespace cuda; template<typename T> void printDeviceVector(std::string const& text, thrust::device_vector<T> const& vector) { std::cout << text << " "; for (int i = 0; i < vector.size(); i++) { std::cout << vector[i] << ", "; } std::cout << std::endl; } struct MAT { __device__ __host__ MAT(float3 const& d, float3 const& s, float e) : diffuse(d), specular(s), exponent(e) { } float3 diffuse; float3 specular; float exponent; }; inline __device__ float G(float3 const& p1, float3 const& n1, float3 const& p2, float3 const& n2) { float3 n_1 = normalize(n1); float3 n_2 = normalize(n2); float3 w = normalize(p2 - p1); float cos_theta_1 = clamp(dot(n_1, w), 0.f, 1.f); float cos_theta_2 = clamp(dot(n_2, -w), 0.f, 1.f); float dist = length(p2 - p1); return (cos_theta_1 * cos_theta_2) / (dist * dist); } inline __device__ float3 f_r(float3 const& w_i, float3 const& w_o, float3 const& n, MAT const& mat) { const float3 d = CUDA_ONE_OVER_PI * mat.diffuse; float3 s = make_float3(0.f); if (length(mat.specular) > 0.f) { const float cos_theta = max(0.f, dot(reflect(-w_i, n), w_o)); s = 0.5f * CUDA_ONE_OVER_PI * (mat.exponent+2.f) * pow(cos_theta, mat.exponent) * mat.specular; } return d+s; } inline __device__ float3 f_r(float3 const& from, float3 const& over, float3 const& to, float3 const& n, MAT const& mat) { const float3 w_i = normalize(from - over); const float3 w_o = normalize(to - over); return f_r(w_i, w_o, n, mat); } inline __device__ float3 getRadiance(float3 const& pos, float3 const& norm, MAT const& mat, float3 const& avpl_pos, float3 const& avpl_norm, float3 const& avpl_w, float3 const& avpl_L, MAT const& avpl_mat, float3 const& camPos) { const float3 direction = normalize(pos - avpl_pos); float3 brdf_light = f_r(-avpl_w, direction, avpl_norm, avpl_mat); // check for light source Avpl if(length(avpl_w) == 0.f) brdf_light = make_float3(1.f); const float3 brdf = f_r(avpl_pos, pos, camPos, norm, mat); return avpl_L * brdf_light * G(pos, norm, avpl_pos, avpl_norm) * brdf; } inline __device__ bool intersectLinePlane(float3 const& dir_line, float3 const& point_line, float3 const& normal_plane, float3 const& point_plane, float &t) { const float denom = dot(dir_line, normal_plane); if (denom == 0.f) return false; t = dot(point_plane - point_line, normal_plane) / denom; return true; } inline __device__ float3 getAntiradiance(float3 const& pos, float3 const& norm, MAT const& mat, float3 const& avpl_pos, float3 const& avpl_norm, float3 const& avpl_w, float3 const& avpl_A, MAT const& avpl_mat, float3 const& camPos, float radius) { float3 antirad = make_float3(0.f); float d = 0.f; if (!intersectLinePlane(avpl_w, avpl_pos, norm, pos, d) || d <= 0.f) { return antirad; } const float3 hitPoint = avpl_pos + d * normalize(avpl_w); if (length(hitPoint - pos) >= radius) { return antirad; } if (dot(normalize(hitPoint - avpl_pos), avpl_w) <= 0.f) { return antirad; } if (dot(norm, avpl_w) >= 0.f) { return antirad; } const float3 direction = normalize(pos - avpl_pos); if (dot(direction, avpl_norm) >= 0.f) { return antirad; } const float cos_theta = max(0.f, dot(norm, -direction)); const float3 brdf = f_r(avpl_pos, pos, camPos, norm, mat); return avpl_A * /*cos_theta **/ brdf / (CUDA_PI * radius * radius); } inline __device__ float3 getRadiance(float3 const& pos, float3 const& norm, MAT const& mat, MAT const& avpl_mat, int idx, AvplBvhNodeDataParam* dataParam, float3 const& camPos) { const float3 avpl_pos = dataParam->position[idx]; const float3 avpl_norm = dataParam->normal[idx]; const float3 avpl_incRad = dataParam->incRadiance[idx]; const float3 avpl_incDir = dataParam->incDirection[idx]; return getRadiance(pos, norm, mat, avpl_pos, avpl_norm, avpl_incDir, avpl_incRad, avpl_mat, camPos); } __global__ void kernel_radiance( cudaSurfaceObject_t outRadiance, cudaSurfaceObject_t inPositions, cudaSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const bool calcPixelValue = (x < width && y < height); float3 pos = make_float3(1.f); float3 norm = make_float3(1.f); int materialIndex = 0; if (calcPixelValue) { // fetch gbuffer float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); norm= make_float3(data); materialIndex = int(data.w); } float3 outRad = make_float3(0.f); float3 outAntirad = make_float3(0.f); const int numAvpls = avpls->numAvpls; const int threadId = threadIdx.x + threadIdx.y * blockDim.x; const int chunkSize = THREADS_X * THREADS_Y; const int numChunks = max(numAvpls / chunkSize, 0); __shared__ float3 avpl_position[chunkSize]; __shared__ float3 avpl_normal[chunkSize]; __shared__ float3 avpl_incRadiance[chunkSize]; __shared__ float3 avpl_incDirection[chunkSize]; __shared__ int avpl_materialIndex[chunkSize]; __shared__ float3 material_diffuse[MAX_NUM_MATERIALS]; __shared__ float3 material_specular[MAX_NUM_MATERIALS]; __shared__ float material_exponent[MAX_NUM_MATERIALS]; if (threadId < materials->numMaterials && threadId < MAX_NUM_MATERIALS) { material_diffuse[threadId] = materials->diffuse[threadId]; material_specular[threadId] = materials->specular[threadId]; material_exponent[threadId] = materials->exponent[threadId]; } syncthreads(); MAT mat(material_diffuse[materialIndex], material_specular[materialIndex], material_exponent[materialIndex]); for (int chunk = 0; chunk < numChunks; ++chunk) { // load chunk into shared memory const int index = chunkSize * chunk + threadId; avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_incRadiance[threadId] = avpls->incRadiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; syncthreads(); if (!calcPixelValue) continue; // process avpls for(int i = 0; i < chunkSize; ++i) { int matIndex = avpl_materialIndex[i]; MAT avpl_mat(material_diffuse[matIndex], material_specular[matIndex], material_exponent[matIndex]); outRad += getRadiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_incRadiance[i], avpl_mat, camPos); } } // remainig avpls const int index = chunkSize * numChunks + threadId; if (index < numAvpls) { avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_incRadiance[threadId] = avpls->incRadiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; } syncthreads(); const int remaining = numAvpls - numChunks * chunkSize; for (int i = 0; i < remaining; ++i) { MAT avpl_mat(material_diffuse[avpl_materialIndex[i]], material_specular[avpl_materialIndex[i]], material_exponent[avpl_materialIndex[i]]); outRad += getRadiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_incRadiance[i], avpl_mat, camPos); } if (!calcPixelValue) return; float4 out = make_float4(outRad, 1.f); surf2Dwrite(out, outRadiance, x * sizeof(float4), y); } __global__ void kernel_radiance_simple( cudaSurfaceObject_t outRadiance, cudaSurfaceObject_t inPositions, cudaSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm = make_float3(data); const int materialIndex = int(data.w); float3 outRad = make_float3(0.f); const int numAvpls = avpls->numAvpls; MAT mat(materials->diffuse[materialIndex], materials->specular[materialIndex], materials->exponent[materialIndex]); for (int i = 0; i < numAvpls; ++i) { const int avpl_mat_index = avpls->materialIndex[i]; MAT avpl_mat(materials->diffuse[avpl_mat_index], materials->specular[avpl_mat_index], materials->exponent[avpl_mat_index]); outRad += getRadiance(pos, norm, mat, avpls->position[i], avpls->normal[i], avpls->incDirection[i], avpls->incRadiance[i], avpl_mat, camPos); } float4 out = make_float4(outRad, 1.f); surf2Dwrite(out, outRadiance, x * sizeof(float4), y); } __global__ void kernel_bvh( cudaSurfaceObject_t outRadiance, cudaSurfaceObject_t inPositions, cudaSurfaceObject_t inNormals, BvhParam* bvhParam, AvplBvhNodeDataParam* dataParam, MaterialsGpuParam* materials, float3 camPos, int bvhLevel, float refThresh, bool genDebugInfo, uint2 debugPixel, int* usedAvpls, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } bool generateDebugInfo = (x == debugPixel.x) && (y == debugPixel.y) && genDebugInfo; float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos= make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm= make_float3(data); const int materialIndex = int(data.w); const int threadId = threadIdx.x + threadIdx.y * blockDim.x; __shared__ float3 material_diffuse[MAX_NUM_MATERIALS]; __shared__ float3 material_specular[MAX_NUM_MATERIALS]; __shared__ float material_exponent[MAX_NUM_MATERIALS]; if (threadId < materials->numMaterials && threadId < MAX_NUM_MATERIALS) { material_diffuse[threadId] = materials->diffuse[threadId]; material_specular[threadId] = materials->specular[threadId]; material_exponent[threadId] = materials->exponent[threadId]; } syncthreads(); MAT mat(material_diffuse[materialIndex], material_specular[materialIndex], material_exponent[materialIndex]); float3 outRad = make_float3(0.f); /* for (int i = 0; i < bvhParam->numLeafs + bvhParam->numNodes; ++i) { int matIndex = dataParam->materialIndex[i]; MAT avpl_mat(material_diffuse[matIndex], material_specular[matIndex], material_exponent[matIndex]); outRad += getRadiance(pos, norm, mat, dataParam->position[i], dataParam->normal[i], dataParam->incDirection[i], dataParam->incRadiance[i], avpl_mat, camPos); } */ int stack[STACK_SIZE]; int stack_depth[STACK_SIZE]; stack[0] = -1; stack_depth[0] = 0; int stack_ptr = 1; bool error = false; while (stack_ptr > 0) { stack_ptr--; const int nodeIndex = stack[stack_ptr]; const int nodeDepth = stack_depth[stack_ptr]; if (nodeIndex >= 0) { // leaf const int matIdx = dataParam->materialIndex[nodeIndex]; MAT avpl_mat(material_diffuse[matIdx], material_specular[matIdx], material_exponent[matIdx]); outRad += getRadiance(pos, norm, mat, avpl_mat, nodeIndex, dataParam, camPos); if (generateDebugInfo) { usedAvpls[nodeIndex] = 1; } } else { // inner node const int idx = bvhParam->numLeafs-(nodeIndex+1); const BvhNode bvhNode = bvhParam->nodes[-(nodeIndex+1)]; if (-(nodeIndex+1) < 0 || -(nodeIndex+1) >= bvhParam->numNodes) { error = true; break; } const bool clusterVisible = (dot(norm, normalize(bvhNode.bbMax - pos)) > 0) || (dot(norm, normalize(bvhNode.bbMin - pos)) > 0); if (!clusterVisible) continue; const float3 clusterToPoint = normalize(dataParam->position[idx] - pos); const float radius = 0.5f * length(bvhNode.bbMax - bvhNode.bbMin); const float dist = length(dataParam->position[idx] - pos); const float solidAngle = 2.f * CUDA_PI * (1.f - dist / (sqrt(radius * radius + dist * dist))); const bool useNode = (solidAngle < refThresh); if (useNode) { const int matIdx = dataParam->materialIndex[idx]; MAT avpl_mat(material_diffuse[matIdx], material_specular[matIdx], material_exponent[matIdx]); outRad += getRadiance(pos, norm, mat, avpl_mat, idx, dataParam, camPos); if (generateDebugInfo) { usedAvpls[idx] = 1; } } else { stack[stack_ptr] = bvhNode.left; stack_depth[stack_ptr] = nodeDepth + 1; stack_ptr++; stack[stack_ptr] = bvhNode.right; stack_depth[stack_ptr] = nodeDepth + 1; stack_ptr++; if (stack_ptr >= STACK_SIZE) { error = true; break; } } } } float4 out = make_float4(outRad, 1.f); if (error) { out = make_float4(100000.f, 0.f, 100000.f, 1.f); } surf2Dwrite(out, outRadiance, x * sizeof(float4), y); } __global__ void kernel_antiradiance( cudaSurfaceObject_t outAntiradiance, cudaSurfaceObject_t inPositions, cudaSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, float photonRadius, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const bool calcPixelValue = (x < width && y < height); float3 pos = make_float3(1.f); float3 norm = make_float3(1.f); int materialIndex = 0; if (calcPixelValue) { // fetch gbuffer float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); norm= make_float3(data); materialIndex = int(data.w); } float3 outAntirad = make_float3(0.f); const int numAvpls = avpls->numAvpls; const int threadId = threadIdx.x + threadIdx.y * blockDim.x; const int chunkSize = THREADS_X * THREADS_Y; const int numChunks = max(numAvpls / chunkSize, 0); __shared__ float3 avpl_position[chunkSize]; __shared__ float3 avpl_normal[chunkSize]; __shared__ float3 avpl_antiradiance[chunkSize]; __shared__ float3 avpl_incDirection[chunkSize]; __shared__ int avpl_materialIndex[chunkSize]; __shared__ float3 material_diffuse[MAX_NUM_MATERIALS]; __shared__ float3 material_specular[MAX_NUM_MATERIALS]; __shared__ float material_exponent[MAX_NUM_MATERIALS]; if (threadId < materials->numMaterials && threadId < MAX_NUM_MATERIALS) { material_diffuse[threadId] = materials->diffuse[threadId]; material_specular[threadId] = materials->specular[threadId]; material_exponent[threadId] = materials->exponent[threadId]; } syncthreads(); MAT mat(material_diffuse[materialIndex], material_specular[materialIndex], material_exponent[materialIndex]); for (int chunk = 0; chunk < numChunks; ++chunk) { // load chunk into shared memory const int index = chunkSize * chunk + threadId; avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_antiradiance[threadId] = avpls->antiradiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; syncthreads(); if (!calcPixelValue) continue; // process avpls for(int i = 0; i < chunkSize; ++i) { int matIndex = avpl_materialIndex[i]; MAT avpl_mat(material_diffuse[matIndex], material_specular[matIndex], material_exponent[matIndex]); outAntirad += getAntiradiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_antiradiance[i], avpl_mat, camPos, photonRadius); } } // remainig avpls const int index = chunkSize * numChunks + threadId; if (index < numAvpls) { avpl_position[threadId] = avpls->position[index]; avpl_normal[threadId] = avpls->normal[index]; avpl_antiradiance[threadId] = avpls->antiradiance[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; avpl_materialIndex[threadId] = avpls->materialIndex[index]; } syncthreads(); const int remaining = numAvpls - numChunks * chunkSize; for (int i = 0; i < remaining; ++i) { MAT avpl_mat(material_diffuse[avpl_materialIndex[i]], material_specular[avpl_materialIndex[i]], material_exponent[avpl_materialIndex[i]]); outAntirad += getAntiradiance(pos, norm, mat, avpl_position[i], avpl_normal[i], avpl_incDirection[i], avpl_antiradiance[i], avpl_mat, camPos, photonRadius); } if (!calcPixelValue) return; float4 out = make_float4(outAntirad, 1.f); surf2Dwrite(out, outAntiradiance, x * sizeof(float4), y); } __global__ void kernel_antiradiance_simple( cudaSurfaceObject_t outAntiradiance, cudaSurfaceObject_t inPositions, cudaSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, float photonRadius, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm = make_float3(data); const int materialIndex = int(data.w); float3 outAntirad = make_float3(0.f); MAT mat(materials->diffuse[materialIndex], materials->specular[materialIndex], materials->exponent[materialIndex]); for (int i = 0; i < avpls->numAvpls; ++i) { const int avpl_mat_index = avpls->materialIndex[i]; MAT avpl_mat(materials->diffuse[avpl_mat_index], materials->specular[avpl_mat_index], materials->exponent[avpl_mat_index]); outAntirad += getAntiradiance(pos, norm, mat, avpls->position[i], avpls->normal[i], avpls->incDirection[i], avpls->antiradiance[i], avpl_mat, camPos, photonRadius); } float4 out = make_float4(outAntirad, 1.f); surf2Dwrite(out, outAntiradiance, x * sizeof(float4), y); } __global__ void kernel_antiradiance_clusterred( dim3 dimensions, cudaSurfaceObject_t outAntiradiance, cudaSurfaceObject_t inPositions, cudaSurfaceObject_t inNormals, AvplsGpuParam* avpls, MaterialsGpuParam* materials, float3 camPos, float photonRadius, int* numLightsPerCluster, int* lightsPerCluster, int* clusterIds, int maxNumLightsPerCluster, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 data; surf2Dread(&data, inPositions, x * sizeof(float4), y); const float3 pos = make_float3(data); surf2Dread(&data, inNormals, x * sizeof(float4), y); const float3 norm = make_float3(data); const int materialIndex = int(data.w); MAT mat(materials->diffuse[materialIndex], materials->specular[materialIndex], materials->exponent[materialIndex]); float3 outAntirad = make_float3(0.f); const int clusterId = clusterIds[y * dimensions.x + x]; const int numLights = numLightsPerCluster[clusterId]; const int clusterOffset = clusterId * maxNumLightsPerCluster; for (int i = 0; i < numLights; ++i) { int lightIdx = lightsPerCluster[clusterOffset + i]; int matIndex = avpls->materialIndex[lightIdx]; MAT avpl_mat(materials->diffuse[matIndex], materials->specular[matIndex], materials->exponent[matIndex]); outAntirad += getAntiradiance(pos, norm, mat, avpls->position[lightIdx], avpls->normal[lightIdx], avpls->incDirection[lightIdx], avpls->antiradiance[lightIdx], avpl_mat, camPos, photonRadius); } float4 out = make_float4(outAntirad, 1.f); surf2Dwrite(out, outAntiradiance, x * sizeof(float4), y); } // http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms __device__ bool intersectBB(float3 const& bbMin, float3 const& bbMax, float3 const& o, float3 const& d) { const float dirfracx = 1.0f / d.x; const float dirfracy = 1.0f / d.y; const float dirfracz = 1.0f / d.z; const float t1 = (bbMin.x - o.x) * dirfracx; const float t2 = (bbMax.x - o.x) * dirfracx; const float t3 = (bbMin.y - o.y) * dirfracy; const float t4 = (bbMax.y - o.y) * dirfracy; const float t5 = (bbMin.z - o.z) * dirfracz; const float t6 = (bbMax.z - o.z) * dirfracz; const float tmin = max(max(min(t1, t2), min(t3, t4)), min(t5, t6)); const float tmax = min(min(max(t1, t2), max(t3, t4)), max(t5, t6)); if (tmax < 0 || tmin > tmax) { return false; } return true; } __global__ void kernel_gather_cluster_lights( AvplsGpuParam* avpls, int* numLightsPerCluster, int* lightsPerCluster, float photonRadius, int maxNumLightsPerCluster, float3* clusterBBMin, float3* clusterBBMax, int numClusters) { const int clusterId = blockIdx.x * blockDim.x + threadIdx.x; const bool validClusterId = clusterId < numClusters; const float3 bbMin = clusterBBMin[clusterId] - 2.f * make_float3(photonRadius); const float3 bbMax = clusterBBMax[clusterId] + 2.f * make_float3(photonRadius); const int clusterOffset = maxNumLightsPerCluster * clusterId; int numClusterLights = 0; const int numLights = avpls->numAvpls; const int threadId = threadIdx.x; const int chunkSize = NUM_THREADS_GATHER_CLUSTER_LIGHTS; const int numChunks = numLights / chunkSize; __shared__ float3 avpl_position[chunkSize]; __shared__ float3 avpl_incDirection[chunkSize]; for (int chunk = 0; chunk < numChunks; ++chunk) { // load chunk into shared memory const int index = chunkSize * chunk + threadId; avpl_position[threadId] = avpls->position[index]; avpl_incDirection[threadId] = avpls->incDirection[index]; syncthreads(); if (!validClusterId) continue; for(int i = 0; i < chunkSize; ++i) { // check for hit with cluster if (intersectBB(bbMin, bbMax, avpl_position[i], avpl_incDirection[i])) { const int idx = chunkSize * chunk + i; lightsPerCluster[clusterOffset + numClusterLights] = idx; numClusterLights++; } } } syncthreads(); if (!validClusterId) return; // remainig avpls for (int i = numChunks * chunkSize; i < numLights; ++i) { if (intersectBB(bbMin, bbMax, avpls->position[i], avpls->incDirection[i])) { lightsPerCluster[clusterOffset + numClusterLights] = i; numClusterLights++; } } numLightsPerCluster[clusterId] = numClusterLights; } __global__ void kernel_gather_cluster_lights_simple( AvplsGpuParam* avpls, int* numLightsPerCluster, int* lightsPerCluster, float photonRadius, int maxNumLightsPerCluster, float3* clusterBBMin, float3* clusterBBMax, int numClusters) { const int clusterId = blockIdx.x * blockDim.x + threadIdx.x; if (clusterId >= numClusters) { return; } const float3 bbMin = clusterBBMin[clusterId] - 2.f * make_float3(photonRadius); const float3 bbMax = clusterBBMax[clusterId] + 2.f * make_float3(photonRadius); const int clusterOffset = maxNumLightsPerCluster * clusterId; int numClusterLights = 0; for (int i = 0; i < avpls->numAvpls; ++i) { if (intersectBB(bbMin, bbMax, avpls->position[i], avpls->incDirection[i])) { lightsPerCluster[clusterOffset + numClusterLights] = i; numClusterLights++; } } numLightsPerCluster[clusterId] = numClusterLights; } __global__ void kernel_combine( cudaSurfaceObject_t outResult, cudaSurfaceObject_t inRadiance, cudaSurfaceObject_t inAntiradiance, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 radiance; surf2Dread(&radiance, inRadiance, x * sizeof(float4), y); float4 antiradiance; surf2Dread(&antiradiance, inAntiradiance, x * sizeof(float4), y); //float4 out = max(make_float4(0.f), radiance - antiradiance); float4 out = radiance - antiradiance; surf2Dwrite(out, outResult, x * sizeof(float4), y); } CudaGather::CudaGather(CCamera* camera, GLuint glPositonTexture, GLuint glNormalTexture, GLuint glResultOutputTexture, GLuint glRadianceOutputTexture, GLuint glAntiradianceOutputTexture, std::vector<MATERIAL> const& materials, COGLUniformBuffer* ubTransform, CConfigManager* confManager) : m_camera(camera), m_width(camera->GetWidth()), m_height(camera->GetHeight()), m_ubTransform(ubTransform), m_confManager(confManager) { m_positionResource.reset(new CudaGraphicsResource(glPositonTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsNone)); m_normalResource.reset(new CudaGraphicsResource(glNormalTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsNone)); m_antiradianceOutputResource.reset(new CudaGraphicsResource(glAntiradianceOutputTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore)); m_radianceOutputResource.reset(new CudaGraphicsResource(glRadianceOutputTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore)); m_resultOutputResource.reset(new CudaGraphicsResource(glResultOutputTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore)); m_materialsGpu.reset(new MaterialsGpu(materials)); } CudaGather::~CudaGather() { m_materialsGpu.reset(nullptr); } void CudaGather::run(std::vector<Avpl> const& avpls, glm::vec3 const& cameraPosition, SceneProbe* sceneProbe, float sceneExtent, bool profile) { CudaGraphicsResourceMappedArray positionsMapped(m_positionResource.get()); CudaGraphicsResourceMappedArray normalsMapped(m_normalResource.get()); CudaGraphicsResourceMappedArray radianceOutMapped(m_radianceOutputResource.get()); CudaGraphicsResourceMappedArray antiradianceOutMapped(m_antiradianceOutputResource.get()); CudaGraphicsResourceMappedArray resultOutMapped(m_resultOutputResource.get()); CudaTimer timer; timer.start(); AvplsGpu avplsGpu(avpls); timer.stop(); if(profile) std::cout << "transfer avpl data to gpu took: " << timer.getTime() << "ms" << std::endl; //////////////////////////////////////////////////////////////////////// // gather radiance //////////////////////////////////////////////////////////////////////// if (m_confManager->GetConfVars()->useLightCuts) { bool generateDebugInfo = m_confManager->GetConfVars()->UseDebugMode && sceneProbe; cuda::CudaTimer buildBvh; buildBvh.start(); std::unique_ptr<AvplBvh> avplBvh = std::unique_ptr<AvplBvh>(new AvplBvh(avpls, false)); buildBvh.stop(); cuda::CudaTimer gather; gather.start(); glm::uvec2 pixel(0, 0); if (sceneProbe) pixel = sceneProbe->getPixel(); thrust::device_vector<int> usedAvplsGpu(avplBvh->getBvhData()->numLeafs + avplBvh->getBvhData()->numNodes); thrust::host_vector<int> usedAvplsCpu(avplBvh->getBvhData()->numLeafs + avplBvh->getBvhData()->numNodes); thrust::fill(usedAvplsGpu.begin(), usedAvplsGpu.end(), 0); // Invoke kernel dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); kernel_bvh<<<dimGrid, dimBlock>>>( radianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplBvh->getBvhParam(), avplBvh->getAvplBvhNodeDataParam(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), m_confManager->GetConfVars()->bvhLevel, m_confManager->GetConfVars()->ClusterRefinementThreshold, generateDebugInfo, make_uint2(pixel), thrust::raw_pointer_cast(&usedAvplsGpu[0]), m_width, m_height); if (generateDebugInfo) { std::vector<glm::vec3> positions; std::vector<glm::vec3> colors; std::vector<glm::vec3> bbMins; std::vector<glm::vec3> bbMaxs; int numLeafs = avplBvh->getBvhData()->numLeafs; thrust::copy(usedAvplsGpu.begin(), usedAvplsGpu.end(), usedAvplsCpu.begin()); for (int i = 0; i < usedAvplsCpu.size(); ++i) { if (usedAvplsCpu[i] == 1) { glm::vec3 pos(make_vec3(avplBvh->getAvplBvhNodeData()->position[i])); positions.push_back(pos); if (i < numLeafs) { bbMins.push_back(pos); bbMaxs.push_back(pos); colors.push_back(glm::vec3(1.f, 0.f, 0.f)); } else { colors.push_back(glm::vec3(0.f, 1.f, 0.f)); int idx = i - numLeafs; glm::vec3 bbmin(make_vec3(avplBvh->getNode(idx).bbMin)); glm::vec3 bbmax(make_vec3(avplBvh->getNode(idx).bbMax)); bbMins.push_back(bbmin); bbMaxs.push_back(bbmax); } } } m_pointCloud.reset(new PointCloud(positions, colors, m_ubTransform, m_confManager->GetConfVars()->lightRadiusScale * sceneExtent / 100.f)); m_aabbCloud.reset(new AABBCloud(bbMins, bbMaxs, m_ubTransform)); } gather.stop(); if(profile) std::cout << "gather build bvh took: " << buildBvh.getTime() << "ms" << std::endl; if(profile) std::cout << "gather radiance with lightcuts took: " << gather.getTime() << "ms" << std::endl; } else { CudaTimer gather; gather.start(); // Invoke kernel dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); kernel_radiance_simple<<<dimGrid, dimBlock>>>( radianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplsGpu.param->getDevicePtr(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), m_width, m_height); gather.stop(); if(profile) std::cout << "gather radiance without lightcuts took: " << gather.getTime() << "ms" << std::endl; } //////////////////////////////////////////////////////////////////////// // gather antiradiance //////////////////////////////////////////////////////////////////////// { const float photonRadius = m_confManager->GetConfVars()->photonRadiusScale * sceneExtent / 100.f; if (m_confManager->GetConfVars()->useClusteredDeferred) { const int maxNumLightsPerCluster = avpls.size(); thrust::device_vector<int> numLightsPerCluster(m_visiblePointsBvh->m_numClusters, 0); thrust::device_vector<int> lightsPerCluster(maxNumLightsPerCluster * m_visiblePointsBvh->m_numClusters, 0); { // gather lights per cluster CudaTimer timer; timer.start(); if (m_confManager->GetConfVars()->gatherClusterLightsSimple) { dim3 dimBlock(128); dim3 dimGrid((m_visiblePointsBvh->m_numClusters + dimBlock.x - 1) / dimBlock.x); kernel_gather_cluster_lights_simple<<<dimGrid, dimBlock>>>( avplsGpu.param->getDevicePtr(), thrust::raw_pointer_cast(&numLightsPerCluster[0]), thrust::raw_pointer_cast(&lightsPerCluster[0]), photonRadius, maxNumLightsPerCluster, thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMin[0])), thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMax[0])), m_visiblePointsBvh->m_numClusters); } else { dim3 dimBlock(NUM_THREADS_GATHER_CLUSTER_LIGHTS); dim3 dimGrid((m_visiblePointsBvh->m_numClusters + dimBlock.x - 1) / dimBlock.x); kernel_gather_cluster_lights<<<dimGrid, dimBlock>>>( avplsGpu.param->getDevicePtr(), thrust::raw_pointer_cast(&numLightsPerCluster[0]), thrust::raw_pointer_cast(&lightsPerCluster[0]), photonRadius, maxNumLightsPerCluster, thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMin[0])), thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterBBMax[0])), m_visiblePointsBvh->m_numClusters); } timer.stop(); if(profile) std::cout << "gather lights per cluster: " << timer.getTime() << "ms" << std::endl; } // gather antiradiance CudaTimer gather; gather.start(); dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); dim3 dimensions(m_width, m_height); kernel_antiradiance_clusterred<<<dimGrid, dimBlock>>>( dimensions, antiradianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplsGpu.param->getDevicePtr(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), photonRadius, thrust::raw_pointer_cast(&numLightsPerCluster[0]), thrust::raw_pointer_cast(&lightsPerCluster[0]), thrust::raw_pointer_cast(&(m_visiblePointsBvh->m_clusterIdBuffer[0])), maxNumLightsPerCluster, m_width, m_height); gather.stop(); if(profile) std::cout << "gather antiradiance took: " << gather.getTime() << "ms" << std::endl; } else{ CudaTimer gather; gather.start(); dim3 dimBlock(THREADS_X, THREADS_Y); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); kernel_antiradiance<<<dimGrid, dimBlock>>>( antiradianceOutMapped.getCudaSurfaceObject(), positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), avplsGpu.param->getDevicePtr(), m_materialsGpu->param->getDevicePtr(), make_float3(cameraPosition), m_confManager->GetConfVars()->photonRadiusScale * sceneExtent / 100.f, m_width, m_height); gather.stop(); if(profile) std::cout << "gather antiradiance took: " << gather.getTime() << "ms" << std::endl; } } //////////////////////////////////////////////////////////////////////// // combine radiance and antiradiance //////////////////////////////////////////////////////////////////////// { CudaTimer timer; timer.start(); dim3 dimBlock(32, 32); dim3 dimGrid((m_width + dimBlock.x - 1) / dimBlock.x, (m_height + dimBlock.y - 1) / dimBlock.y); kernel_combine<<<dimGrid, dimBlock>>>( resultOutMapped.getCudaSurfaceObject(), radianceOutMapped.getCudaSurfaceObject(), antiradianceOutMapped.getCudaSurfaceObject(), m_width, m_height); timer.stop(); if(profile) std::cout << "combine radiance and antiradiance took: " << timer.getTime() << "ms" << std::endl; } } void CudaGather::rebuildVisiblePointsBvh() { CudaGraphicsResourceMappedArray positionsMapped(m_positionResource.get()); CudaGraphicsResourceMappedArray normalsMapped(m_normalResource.get()); m_visiblePointsBvh.reset(new VisiblePointsBvh( positionsMapped.getCudaSurfaceObject(), normalsMapped.getCudaSurfaceObject(), m_width, m_height, false, make_float3(m_camera->GetPosition()), m_camera->getZNear(), m_camera->getTheta())); }
569354633d77b99b12393a8b7c4f99b0cd769d38.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudarray/common.hpp" #include "cudarray/image/img2win.hpp" namespace cudarray { inline static int ceil_div(int x, int y) { return (x + y - 1) / y; } template <typename T, int group_size> __global__ void kernel_img2win(const T *imgs, int n_threads, int n_imgs, int img_h, int img_w, int wins_h, int wins_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *wins) { int win_size = win_h*win_w; CUDA_GRID_STRIDE_LOOP(idx, n_threads) { int wins_x = idx % wins_w; int wins_y = (idx / wins_w) % wins_h; // window offset int k = (idx / wins_w / wins_h) % win_size; // image idx int n = idx / wins_w / wins_h / win_size * group_size; int img_x = wins_x * stride_x - pad_x + (k % win_w); int img_y = wins_y * stride_y - pad_y + k / win_w; imgs += (n*img_h + img_y)*img_w + img_x; wins += ((n*win_size + k)*wins_h + wins_y)*wins_w + wins_x; bool valid = img_x >= 0 && img_x < img_w && img_y >= 0 && img_y < img_h; for (int i = 0; i < group_size; ++i) { if (i+n < n_imgs) { if (valid) { *wins = *imgs; } else { *wins = 0.0; } } wins += win_size * wins_h * wins_w; imgs += img_h * img_w; } } } template <typename T> void img2win(const T *imgs, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *wins) { int wins_h = (img_h + 2*pad_y - win_h) / stride_y + 1; int wins_w = (img_w + 2*pad_x - win_w) / stride_x + 1; const int group_size = 32; int n_threads = ceil_div(n_imgs, group_size)*win_h*win_w*wins_h*wins_w; hipLaunchKernelGGL(( kernel_img2win<T, group_size>) , dim3(cuda_blocks(n_threads)), dim3(kNumBlockThreads), 0, 0, imgs, n_threads, n_imgs, img_h, img_w, wins_h, wins_w, win_h, win_w, pad_y, pad_x, stride_y, stride_x, wins ); CUDA_KERNEL_CHECK; } template void img2win(const float *imgs, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, float *wins); template <typename T> __global__ void kernel_win2img(const T* wins, int n_threads, int n_imgs, int img_h, int img_w, int wins_h, int wins_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *imgs) { CUDA_GRID_STRIDE_LOOP(idx, n_threads) { int img_x = idx % img_w + pad_x; int img_y = (idx / img_w) % img_h + pad_y; int n = idx / img_w / img_h; int wins_x_start = (img_x < win_w) ? 0 : (img_x - win_w) / stride_x + 1; int wins_x_end = min(img_x / stride_x + 1, wins_w); int wins_y_start = (img_y < win_h) ? 0 : (img_y - win_h) / stride_y + 1; int wins_y_end = min(img_y / stride_y + 1, wins_h); int wins_y_offset = (1 - stride_y * win_w * wins_h) * wins_w; int wins_x_offset = (1 - stride_x * wins_h * wins_w); wins += (n * win_h * win_w + img_y * win_w + img_x) * wins_h * wins_w; T sum = 0; for (int wins_y = wins_y_start; wins_y < wins_y_end; ++wins_y) { for (int wins_x = wins_x_start; wins_x < wins_x_end; ++wins_x) { sum += wins[wins_y * wins_y_offset + wins_x * wins_x_offset]; } } imgs[idx] = sum; } } template <typename T> void win2img(const T *wins, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *imgs) { int wins_h = (img_h + 2*pad_y - win_h) / stride_y + 1; int wins_w = (img_w + 2*pad_x - win_w) / stride_x + 1; int n_threads = n_imgs * img_h * img_w; hipLaunchKernelGGL(( kernel_win2img), dim3(cuda_blocks(n_threads)), dim3(kNumBlockThreads), 0, 0, wins, n_threads, n_imgs, img_h, img_w, wins_h, wins_w, win_h, win_w, pad_y, pad_x, stride_y, stride_x, imgs); CUDA_KERNEL_CHECK; } template void win2img(const float *wins, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, float *imgs); }
569354633d77b99b12393a8b7c4f99b0cd769d38.cu
#include "cudarray/common.hpp" #include "cudarray/image/img2win.hpp" namespace cudarray { inline static int ceil_div(int x, int y) { return (x + y - 1) / y; } template <typename T, int group_size> __global__ void kernel_img2win(const T *imgs, int n_threads, int n_imgs, int img_h, int img_w, int wins_h, int wins_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *wins) { int win_size = win_h*win_w; CUDA_GRID_STRIDE_LOOP(idx, n_threads) { int wins_x = idx % wins_w; int wins_y = (idx / wins_w) % wins_h; // window offset int k = (idx / wins_w / wins_h) % win_size; // image idx int n = idx / wins_w / wins_h / win_size * group_size; int img_x = wins_x * stride_x - pad_x + (k % win_w); int img_y = wins_y * stride_y - pad_y + k / win_w; imgs += (n*img_h + img_y)*img_w + img_x; wins += ((n*win_size + k)*wins_h + wins_y)*wins_w + wins_x; bool valid = img_x >= 0 && img_x < img_w && img_y >= 0 && img_y < img_h; for (int i = 0; i < group_size; ++i) { if (i+n < n_imgs) { if (valid) { *wins = *imgs; } else { *wins = 0.0; } } wins += win_size * wins_h * wins_w; imgs += img_h * img_w; } } } template <typename T> void img2win(const T *imgs, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *wins) { int wins_h = (img_h + 2*pad_y - win_h) / stride_y + 1; int wins_w = (img_w + 2*pad_x - win_w) / stride_x + 1; const int group_size = 32; int n_threads = ceil_div(n_imgs, group_size)*win_h*win_w*wins_h*wins_w; kernel_img2win<T, group_size> <<<cuda_blocks(n_threads), kNumBlockThreads>>>( imgs, n_threads, n_imgs, img_h, img_w, wins_h, wins_w, win_h, win_w, pad_y, pad_x, stride_y, stride_x, wins ); CUDA_KERNEL_CHECK; } template void img2win(const float *imgs, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, float *wins); template <typename T> __global__ void kernel_win2img(const T* wins, int n_threads, int n_imgs, int img_h, int img_w, int wins_h, int wins_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *imgs) { CUDA_GRID_STRIDE_LOOP(idx, n_threads) { int img_x = idx % img_w + pad_x; int img_y = (idx / img_w) % img_h + pad_y; int n = idx / img_w / img_h; int wins_x_start = (img_x < win_w) ? 0 : (img_x - win_w) / stride_x + 1; int wins_x_end = min(img_x / stride_x + 1, wins_w); int wins_y_start = (img_y < win_h) ? 0 : (img_y - win_h) / stride_y + 1; int wins_y_end = min(img_y / stride_y + 1, wins_h); int wins_y_offset = (1 - stride_y * win_w * wins_h) * wins_w; int wins_x_offset = (1 - stride_x * wins_h * wins_w); wins += (n * win_h * win_w + img_y * win_w + img_x) * wins_h * wins_w; T sum = 0; for (int wins_y = wins_y_start; wins_y < wins_y_end; ++wins_y) { for (int wins_x = wins_x_start; wins_x < wins_x_end; ++wins_x) { sum += wins[wins_y * wins_y_offset + wins_x * wins_x_offset]; } } imgs[idx] = sum; } } template <typename T> void win2img(const T *wins, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, T *imgs) { int wins_h = (img_h + 2*pad_y - win_h) / stride_y + 1; int wins_w = (img_w + 2*pad_x - win_w) / stride_x + 1; int n_threads = n_imgs * img_h * img_w; kernel_win2img<<<cuda_blocks(n_threads), kNumBlockThreads>>>( wins, n_threads, n_imgs, img_h, img_w, wins_h, wins_w, win_h, win_w, pad_y, pad_x, stride_y, stride_x, imgs); CUDA_KERNEL_CHECK; } template void win2img(const float *wins, int n_imgs, int img_h, int img_w, int win_h, int win_w, int pad_y, int pad_x, int stride_y, int stride_x, float *imgs); }
e3d07101f6e60778b63d47daa851b153ba37524f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include "hardswish.h" #include "utils.h" namespace nvinfer1 { HardSwishPlugin::HardSwishPlugin() { } HardSwishPlugin::~HardSwishPlugin() { } // create the plugin at runtime from a byte stream HardSwishPlugin::HardSwishPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; Tn::read(d, mInputSize); assert(d == a + length); } void HardSwishPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; Tn::write(d, mInputSize); assert(d == a + getSerializationSize()); } size_t HardSwishPlugin::getSerializationSize() const { return sizeof(mInputSize); } int HardSwishPlugin::initialize() { return 0; } Dims HardSwishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(nbInputDims == 1); assert(index == 0); return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } // Set plugin namespace void HardSwishPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* HardSwishPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType HardSwishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool HardSwishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool HardSwishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void HardSwishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { mInputSize = in[0].dims.d[0] * in[0].dims.d[1] * in[0].dims.d[2]; } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void HardSwishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void HardSwishPlugin::detachFromContext() {} const char* HardSwishPlugin::getPluginType() const { return "HardSwishLayer_TRT"; } const char* HardSwishPlugin::getPluginVersion() const { return "1"; } void HardSwishPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* HardSwishPlugin::clone() const { HardSwishPlugin *p = new HardSwishPlugin(); p->setPluginNamespace(mPluginNamespace); p->setInputSize(mInputSize); return p; } __global__ void HardSwishKer(const float *in, float *out, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= size) return; if (in[idx] >= 3.0f) out[idx] = in[idx]; else if (in[idx] < -3.0f) out[idx] = 0.0f; else out[idx] = in[idx] * (in[idx] + 3.0f) / 6.0f; } void HardSwishPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) { int numElem = batchSize * mInputSize; hipLaunchKernelGGL(( HardSwishKer), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0, inputs[0], output, numElem); } int HardSwishPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream) { forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection HardSwishPluginCreator::mFC{}; std::vector<PluginField> HardSwishPluginCreator::mPluginAttributes; HardSwishPluginCreator::HardSwishPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* HardSwishPluginCreator::getPluginName() const { return "HardSwishLayer_TRT"; } const char* HardSwishPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* HardSwishPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* HardSwishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { HardSwishPlugin* obj = new HardSwishPlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* HardSwishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { // This object will be deleted when the network is destroyed, which will // call MishPlugin::destroy() HardSwishPlugin* obj = new HardSwishPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
e3d07101f6e60778b63d47daa851b153ba37524f.cu
#include <assert.h> #include "hardswish.h" #include "utils.h" namespace nvinfer1 { HardSwishPlugin::HardSwishPlugin() { } HardSwishPlugin::~HardSwishPlugin() { } // create the plugin at runtime from a byte stream HardSwishPlugin::HardSwishPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; Tn::read(d, mInputSize); assert(d == a + length); } void HardSwishPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; Tn::write(d, mInputSize); assert(d == a + getSerializationSize()); } size_t HardSwishPlugin::getSerializationSize() const { return sizeof(mInputSize); } int HardSwishPlugin::initialize() { return 0; } Dims HardSwishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(nbInputDims == 1); assert(index == 0); return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } // Set plugin namespace void HardSwishPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* HardSwishPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType HardSwishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool HardSwishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool HardSwishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void HardSwishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { mInputSize = in[0].dims.d[0] * in[0].dims.d[1] * in[0].dims.d[2]; } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void HardSwishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void HardSwishPlugin::detachFromContext() {} const char* HardSwishPlugin::getPluginType() const { return "HardSwishLayer_TRT"; } const char* HardSwishPlugin::getPluginVersion() const { return "1"; } void HardSwishPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* HardSwishPlugin::clone() const { HardSwishPlugin *p = new HardSwishPlugin(); p->setPluginNamespace(mPluginNamespace); p->setInputSize(mInputSize); return p; } __global__ void HardSwishKer(const float *in, float *out, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= size) return; if (in[idx] >= 3.0f) out[idx] = in[idx]; else if (in[idx] < -3.0f) out[idx] = 0.0f; else out[idx] = in[idx] * (in[idx] + 3.0f) / 6.0f; } void HardSwishPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) { int numElem = batchSize * mInputSize; HardSwishKer<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>> (inputs[0], output, numElem); } int HardSwishPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) { forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection HardSwishPluginCreator::mFC{}; std::vector<PluginField> HardSwishPluginCreator::mPluginAttributes; HardSwishPluginCreator::HardSwishPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* HardSwishPluginCreator::getPluginName() const { return "HardSwishLayer_TRT"; } const char* HardSwishPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* HardSwishPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* HardSwishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { HardSwishPlugin* obj = new HardSwishPlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* HardSwishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { // This object will be deleted when the network is destroyed, which will // call MishPlugin::destroy() HardSwishPlugin* obj = new HardSwishPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
8e401d023a4b5b2e48be87c2e4742d1d8bf1d73b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*--------------------------------------------------------------------------*\ Copyright (c) 2008-2010, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. \*--------------------------------------------------------------------------*/ #ifndef _3D_CUBIC_BSPLINE_PREFILTER_H_ #define _3D_CUBIC_BSPLINE_PREFILTER_H_ #include <stdio.h> #include <cutil.h> #include "internal/cubicPrefilter_kernel.cu" //-------------------------------------------------------------------------- // Global CUDA procedures //-------------------------------------------------------------------------- template<class floatN> __global__ void SamplesToCoefficients3DX( floatN* volume, // in-place processing uint pitch, // width in bytes uint width, // width of the volume uint height, // height of the volume uint depth) // depth of the volume { // process lines in x-direction const uint y = blockIdx.x * blockDim.x + threadIdx.x; const uint z = blockIdx.y * blockDim.y + threadIdx.y; const uint startIdx = (z * height + y) * pitch; floatN* ptr = (floatN*)((uchar*)volume + startIdx); ConvertToInterpolationCoefficients(ptr, width, sizeof(floatN)); } template<class floatN> __global__ void SamplesToCoefficients3DY( floatN* volume, // in-place processing uint pitch, // width in bytes uint width, // width of the volume uint height, // height of the volume uint depth) // depth of the volume { // process lines in y-direction const uint x = blockIdx.x * blockDim.x + threadIdx.x; const uint z = blockIdx.y * blockDim.y + threadIdx.y; const uint startIdx = z * height * pitch; floatN* ptr = (floatN*)((uchar*)volume + startIdx); ConvertToInterpolationCoefficients(ptr + x, height, pitch); } template<class floatN> __global__ void SamplesToCoefficients3DZ( floatN* volume, // in-place processing uint pitch, // width in bytes uint width, // width of the volume uint height, // height of the volume uint depth) // depth of the volume { // process lines in z-direction const uint x = blockIdx.x * blockDim.x + threadIdx.x; const uint y = blockIdx.y * blockDim.y + threadIdx.y; const uint startIdx = y * pitch; const uint slice = height * pitch; floatN* ptr = (floatN*)((uchar*)volume + startIdx); ConvertToInterpolationCoefficients(ptr + x, depth, slice); } //-------------------------------------------------------------------------- // Exported functions //-------------------------------------------------------------------------- //! Convert the voxel values into cubic b-spline coefficients //! @param volume pointer to the voxel volume in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels template<class floatN> extern void CubicBSplinePrefilter3D(floatN* volume, uint pitch, uint width, uint height, uint depth) { // Try to determine the optimal block dimensions uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64); uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX); dim3 dimBlock(dimX, dimY); // Replace the voxel values by the b-spline coefficients dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y); hipLaunchKernelGGL(( SamplesToCoefficients3DX<floatN>), dim3(dimGridX), dim3(dimBlock), 0, 0, volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed"); dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y); hipLaunchKernelGGL(( SamplesToCoefficients3DY<floatN>), dim3(dimGridY), dim3(dimBlock), 0, 0, volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed"); dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y); hipLaunchKernelGGL(( SamplesToCoefficients3DZ<floatN>), dim3(dimGridZ), dim3(dimBlock), 0, 0, volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed"); } //! Convert the voxel values into cubic b-spline coefficients //! @param volume pointer to the voxel volume in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels //! @note Prints stopwatch feedback template<class floatN> extern void CubicBSplinePrefilter3DTimer(floatN* volume, uint pitch, uint width, uint height, uint depth) { printf("\nCubic B-Spline Prefilter timer:\n"); uint hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); // Try to determine the optimal block dimensions uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64); uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX); dim3 dimBlock(dimX, dimY); // Replace the voxel values by the b-spline coefficients dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y); hipLaunchKernelGGL(( SamplesToCoefficients3DX<floatN>), dim3(dimGridX), dim3(dimBlock), 0, 0, volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueX = cutGetTimerValue(hTimer); printf("x-direction : %f msec\n", timerValueX); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y); hipLaunchKernelGGL(( SamplesToCoefficients3DY<floatN>), dim3(dimGridY), dim3(dimBlock), 0, 0, volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueY = cutGetTimerValue(hTimer); printf("y-direction : %f msec\n", timerValueY); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y); hipLaunchKernelGGL(( SamplesToCoefficients3DZ<floatN>), dim3(dimGridZ), dim3(dimBlock), 0, 0, volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueZ = cutGetTimerValue(hTimer); printf("z-direction : %f msec\n", timerValueZ); printf("total : %f msec\n\n", timerValueX+timerValueY+timerValueZ); } #endif //_3D_CUBIC_BSPLINE_PREFILTER_H_
8e401d023a4b5b2e48be87c2e4742d1d8bf1d73b.cu
/*--------------------------------------------------------------------------*\ Copyright (c) 2008-2010, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. \*--------------------------------------------------------------------------*/ #ifndef _3D_CUBIC_BSPLINE_PREFILTER_H_ #define _3D_CUBIC_BSPLINE_PREFILTER_H_ #include <stdio.h> #include <cutil.h> #include "internal/cubicPrefilter_kernel.cu" //-------------------------------------------------------------------------- // Global CUDA procedures //-------------------------------------------------------------------------- template<class floatN> __global__ void SamplesToCoefficients3DX( floatN* volume, // in-place processing uint pitch, // width in bytes uint width, // width of the volume uint height, // height of the volume uint depth) // depth of the volume { // process lines in x-direction const uint y = blockIdx.x * blockDim.x + threadIdx.x; const uint z = blockIdx.y * blockDim.y + threadIdx.y; const uint startIdx = (z * height + y) * pitch; floatN* ptr = (floatN*)((uchar*)volume + startIdx); ConvertToInterpolationCoefficients(ptr, width, sizeof(floatN)); } template<class floatN> __global__ void SamplesToCoefficients3DY( floatN* volume, // in-place processing uint pitch, // width in bytes uint width, // width of the volume uint height, // height of the volume uint depth) // depth of the volume { // process lines in y-direction const uint x = blockIdx.x * blockDim.x + threadIdx.x; const uint z = blockIdx.y * blockDim.y + threadIdx.y; const uint startIdx = z * height * pitch; floatN* ptr = (floatN*)((uchar*)volume + startIdx); ConvertToInterpolationCoefficients(ptr + x, height, pitch); } template<class floatN> __global__ void SamplesToCoefficients3DZ( floatN* volume, // in-place processing uint pitch, // width in bytes uint width, // width of the volume uint height, // height of the volume uint depth) // depth of the volume { // process lines in z-direction const uint x = blockIdx.x * blockDim.x + threadIdx.x; const uint y = blockIdx.y * blockDim.y + threadIdx.y; const uint startIdx = y * pitch; const uint slice = height * pitch; floatN* ptr = (floatN*)((uchar*)volume + startIdx); ConvertToInterpolationCoefficients(ptr + x, depth, slice); } //-------------------------------------------------------------------------- // Exported functions //-------------------------------------------------------------------------- //! Convert the voxel values into cubic b-spline coefficients //! @param volume pointer to the voxel volume in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels template<class floatN> extern void CubicBSplinePrefilter3D(floatN* volume, uint pitch, uint width, uint height, uint depth) { // Try to determine the optimal block dimensions uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64); uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX); dim3 dimBlock(dimX, dimY); // Replace the voxel values by the b-spline coefficients dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y); SamplesToCoefficients3DX<floatN><<<dimGridX, dimBlock>>>(volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed"); dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y); SamplesToCoefficients3DY<floatN><<<dimGridY, dimBlock>>>(volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed"); dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y); SamplesToCoefficients3DZ<floatN><<<dimGridZ, dimBlock>>>(volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed"); } //! Convert the voxel values into cubic b-spline coefficients //! @param volume pointer to the voxel volume in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels //! @note Prints stopwatch feedback template<class floatN> extern void CubicBSplinePrefilter3DTimer(floatN* volume, uint pitch, uint width, uint height, uint depth) { printf("\nCubic B-Spline Prefilter timer:\n"); uint hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); // Try to determine the optimal block dimensions uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64); uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX); dim3 dimBlock(dimX, dimY); // Replace the voxel values by the b-spline coefficients dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y); SamplesToCoefficients3DX<floatN><<<dimGridX, dimBlock>>>(volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueX = cutGetTimerValue(hTimer); printf("x-direction : %f msec\n", timerValueX); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y); SamplesToCoefficients3DY<floatN><<<dimGridY, dimBlock>>>(volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueY = cutGetTimerValue(hTimer); printf("y-direction : %f msec\n", timerValueY); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y); SamplesToCoefficients3DZ<floatN><<<dimGridZ, dimBlock>>>(volume, pitch, width, height, depth); CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueZ = cutGetTimerValue(hTimer); printf("z-direction : %f msec\n", timerValueZ); printf("total : %f msec\n\n", timerValueX+timerValueY+timerValueZ); } #endif //_3D_CUBIC_BSPLINE_PREFILTER_H_
f9da03a40bbc08daeb5711786cd660e4c9d9c888.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <GL/glew.h> #include <GL/glut.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include "tables.h" // OGL vertex buffer object GLuint vbo; struct cudaGraphicsResource *vbo_resource; // Size of voxel grid const int dim_x = 64; const int dim_y = 64; const int dim_z = 64; float sim_time = 0.0; // CUDA buffers float* volume; float4* vertices; uint* edge_table; uint* tri_table; uint* num_verts_table; //Block and grid size dim3 threadsPerBlock = dim3(8, 8, 8); dim3 numBlocks = dim3(8, 8, 8); //helper function on device for indexing 3d array __device__ int arrayIndex(int x, int y, int z) { return x + (y*dim_x) + (z*dim_y*dim_x); } // Fill_volume kernel __global__ void fill_volume(float* volume, float t, int dim_x, int dim_y, int dim_z){ uint x = blockDim.x * blockIdx.x + threadIdx.x; uint y = blockDim.y * blockIdx.y + threadIdx.y; uint z = blockDim.z * blockIdx.z + threadIdx.z; uint threadId = x + dim_x*y + dim_x*dim_y*z; float dx = (float)x/dim_x; float dy = (float)y/dim_y; float dz = (float)z/dim_z; float f = abs(cos(0.01*t)); volume[threadId] = f*(dx + dy + dz)/3; //printf("volume[threadId] = %f\n",f*(dx + dy + dz)/3); } // Get triangles kernel __global__ void get_triangles(float* volume, float4* vertices, uint* tri_table, int dim_x, int dim_y, int dim_z){ uint x = blockDim.x * blockIdx.x + threadIdx.x; uint y = blockDim.y * blockIdx.y + threadIdx.y; uint z = blockDim.z * blockIdx.z + threadIdx.z; uint threadId = x + dim_x*y + dim_x*dim_y*z; if (x < (dim_x-1) && y < (dim_y-1) && z < (dim_z-1)) { uint tableIndex = 0; tableIndex = (uint)(volume[arrayIndex(x,y,z)]<0.5); tableIndex += (uint)(volume[arrayIndex(x+1,y,z)]<0.5)*2; tableIndex += (uint)(volume[arrayIndex(x+1,y+1,z)]<0.5)*4; tableIndex += (uint)(volume[arrayIndex(x,y+1,z)]<0.5)*8; tableIndex += (uint)(volume[arrayIndex(x,y,z+1)]<0.5)*16; tableIndex += (uint)(volume[arrayIndex(x+1,y,z+1)]<0.5)*32; tableIndex += (uint)(volume[arrayIndex(x+1,y+1,z+1)]<0.5)*64; tableIndex += (uint)(volume[arrayIndex(x,y+1,z+1)]<0.5)*128; //printf("%d\n", tableIndex); for (int i = 0; i<15; i++) { float4 temp; //Check for the different cases in the tri_table array if (tri_table[16*tableIndex + i] == 255) { temp.x = 0.0; temp.y = 0.0; temp.z = 0.0; temp.w = 1.0; //printf("entered 255\n"); } else if (tri_table[16*tableIndex + i] == 0) { temp.x = (((float)(x))/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = (((float)y)/dim_y)+0.5/dim_y; temp.z = (((float)z)/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 0\n"); } else if (tri_table[16*tableIndex + i] == 1) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 1\n"); } else if (tri_table[16*tableIndex + i] == 2) { temp.x = ((float)x/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 2\n"); } else if (tri_table[16*tableIndex + i] == 3) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 3\n"); } else if (tri_table[16*tableIndex + i] == 4) { temp.x = ((float)x/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = ((float)y/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 4\n"); } else if (tri_table[16*tableIndex + i] == 5) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 5\n"); } else if (tri_table[16*tableIndex + i] == 6) { temp.x = ((float)x/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 6\n"); } else if (tri_table[16*tableIndex + i] == 7) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 7\n"); } else if (tri_table[16*tableIndex + i] == 8) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 8\n"); } else if (tri_table[16*tableIndex + i] == 9) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 9\n"); } else if (tri_table[16*tableIndex + i] == 10) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 10\n"); } else if (tri_table[16*tableIndex + i] == 11) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 11\n"); } //printf("vertices[%i].x = %f\n", i, temp.x); //printf("vertices[%i].y = %f\n", i, temp.y); //printf("vertices[%i].z = %f\n", i, temp.z); //printf("vertices[%i].w = %f\n", i, temp.w); vertices[threadId*15+i] = temp; } } } // Set up and call get_triangles kernel void call_get_triangles(){ // CUDA taking over vertices buffer from OGL size_t num_bytes; hipGraphicsMapResources(1, &vbo_resource, 0); hipGraphicsResourceGetMappedPointer((void **)&vertices, &num_bytes, vbo_resource); // Insert call to get_triangles kernel here hipLaunchKernelGGL(( get_triangles), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, volume, vertices, tri_table, dim_x, dim_y, dim_z); hipDeviceSynchronize(); printf("%s\n", hipGetErrorString(hipGetLastError())); // CUDA giving back vertices buffer to OGL hipGraphicsUnmapResources(1, &vbo_resource, 0); } // Set up and call fill_volume kernel void call_fill_volume(float t){ hipLaunchKernelGGL(( fill_volume), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, volume, t, dim_x, dim_y, dim_z); hipDeviceSynchronize(); } // Creating vertex buffer in OpenGL void init_vertex_buffer(){ glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, dim_x*dim_y*dim_z*15*4*sizeof(float), 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); hipGraphicsGLRegisterBuffer(&vbo_resource, vbo, hipGraphicsMapFlagsWriteDiscard); } // The display function is called at each iteration of the // OGL main loop. It calls the kernels, and draws the result void display(){ sim_time+= 0.1; // Call kernels call_fill_volume(sim_time); call_get_triangles(); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); //Rotate camera glTranslatef(0.5,0.5,0.5); glRotatef(2*sim_time, 0.0, 0.0, 1.0); glTranslatef(-0.5,-0.5,-0.5); //Draw wireframe glTranslatef(0.5,0.5,0.5); glColor3f(0.0, 0.0, 0.0); glutWireCube(1); glTranslatef(-0.5,-0.5,-0.5); // Render vbo as buffer of points glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor3f(0.7, 0.1, 0.3); glDrawArrays(GL_TRIANGLES, 0, dim_x*dim_y*dim_z*15); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); glutPostRedisplay(); } void init_GL(int *argc, char **argv){ glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(512, 512); glutCreateWindow("CUDA Marching Cubes"); glutDisplayFunc(display); glewInit(); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); GLfloat diffuse[] = {1.0,1.0,1.0,1.0}; GLfloat ambient[] = {0.0,0.0,0.0,1.0}; GLfloat specular[] = {1.0,1.0,1.0,1.0}; GLfloat pos[] = {1.0,1.0,0.0,1.0}; glLightfv(GL_LIGHT0, GL_POSITION, pos); glLightfv(GL_LIGHT0, GL_AMBIENT, ambient); glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse); glLightfv(GL_LIGHT0, GL_SPECULAR, specular); glColorMaterial ( GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE ) ; glEnable ( GL_COLOR_MATERIAL ) ; glClearColor(1.0, 1.0, 1.0, 1.0); glDisable(GL_DEPTH_TEST); glViewport(0, 0, 512, 512); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, 1, 0.1, 10.0); gluLookAt(1.5,1.5,1.5,0.5,0.5,0.5,0,0,1); } int main(int argc, char **argv) { // Setting up OpenGL init_GL(&argc, argv); // Setting up OpenGL on CUDA device 0 hipGLSetGLDevice(0); // Creating vertices buffer in OGL/CUDA init_vertex_buffer(); // Allocate memory for volume hipMalloc((float**)&volume, sizeof(float)*dim_x*dim_y*dim_z); // Allocate memory and transfer tables hipMalloc((uint**)&edge_table, sizeof(uint)*256); hipMalloc((uint**)&tri_table, sizeof(uint)*256*16); hipMalloc((uint**)&num_verts_table, sizeof(uint)*256); hipMemcpy(edge_table, edgeTable, sizeof(uint)*256, hipMemcpyHostToDevice); hipMemcpy(tri_table, triTable, sizeof(uint)*256*16, hipMemcpyHostToDevice); hipMemcpy(num_verts_table, numVertsTable, sizeof(uint)*256, hipMemcpyHostToDevice); glutMainLoop(); //sim_time = 0.1; //call_fill_volume(sim_time); //call_get_triangles(); }
f9da03a40bbc08daeb5711786cd660e4c9d9c888.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <GL/glew.h> #include <GL/glut.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include "tables.h" // OGL vertex buffer object GLuint vbo; struct cudaGraphicsResource *vbo_resource; // Size of voxel grid const int dim_x = 64; const int dim_y = 64; const int dim_z = 64; float sim_time = 0.0; // CUDA buffers float* volume; float4* vertices; uint* edge_table; uint* tri_table; uint* num_verts_table; //Block and grid size dim3 threadsPerBlock = dim3(8, 8, 8); dim3 numBlocks = dim3(8, 8, 8); //helper function on device for indexing 3d array __device__ int arrayIndex(int x, int y, int z) { return x + (y*dim_x) + (z*dim_y*dim_x); } // Fill_volume kernel __global__ void fill_volume(float* volume, float t, int dim_x, int dim_y, int dim_z){ uint x = blockDim.x * blockIdx.x + threadIdx.x; uint y = blockDim.y * blockIdx.y + threadIdx.y; uint z = blockDim.z * blockIdx.z + threadIdx.z; uint threadId = x + dim_x*y + dim_x*dim_y*z; float dx = (float)x/dim_x; float dy = (float)y/dim_y; float dz = (float)z/dim_z; float f = abs(cos(0.01*t)); volume[threadId] = f*(dx + dy + dz)/3; //printf("volume[threadId] = %f\n",f*(dx + dy + dz)/3); } // Get triangles kernel __global__ void get_triangles(float* volume, float4* vertices, uint* tri_table, int dim_x, int dim_y, int dim_z){ uint x = blockDim.x * blockIdx.x + threadIdx.x; uint y = blockDim.y * blockIdx.y + threadIdx.y; uint z = blockDim.z * blockIdx.z + threadIdx.z; uint threadId = x + dim_x*y + dim_x*dim_y*z; if (x < (dim_x-1) && y < (dim_y-1) && z < (dim_z-1)) { uint tableIndex = 0; tableIndex = (uint)(volume[arrayIndex(x,y,z)]<0.5); tableIndex += (uint)(volume[arrayIndex(x+1,y,z)]<0.5)*2; tableIndex += (uint)(volume[arrayIndex(x+1,y+1,z)]<0.5)*4; tableIndex += (uint)(volume[arrayIndex(x,y+1,z)]<0.5)*8; tableIndex += (uint)(volume[arrayIndex(x,y,z+1)]<0.5)*16; tableIndex += (uint)(volume[arrayIndex(x+1,y,z+1)]<0.5)*32; tableIndex += (uint)(volume[arrayIndex(x+1,y+1,z+1)]<0.5)*64; tableIndex += (uint)(volume[arrayIndex(x,y+1,z+1)]<0.5)*128; //printf("%d\n", tableIndex); for (int i = 0; i<15; i++) { float4 temp; //Check for the different cases in the tri_table array if (tri_table[16*tableIndex + i] == 255) { temp.x = 0.0; temp.y = 0.0; temp.z = 0.0; temp.w = 1.0; //printf("entered 255\n"); } else if (tri_table[16*tableIndex + i] == 0) { temp.x = (((float)(x))/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = (((float)y)/dim_y)+0.5/dim_y; temp.z = (((float)z)/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 0\n"); } else if (tri_table[16*tableIndex + i] == 1) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 1\n"); } else if (tri_table[16*tableIndex + i] == 2) { temp.x = ((float)x/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 2\n"); } else if (tri_table[16*tableIndex + i] == 3) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 3\n"); } else if (tri_table[16*tableIndex + i] == 4) { temp.x = ((float)x/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = ((float)y/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 4\n"); } else if (tri_table[16*tableIndex + i] == 5) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 5\n"); } else if (tri_table[16*tableIndex + i] == 6) { temp.x = ((float)x/dim_x)+(1.0/(2*dim_x))+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 6\n"); } else if (tri_table[16*tableIndex + i] == 7) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/(2*dim_y))+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/dim_z)+0.5/dim_z; temp.w = 1.0; //printf("entered 7\n"); } else if (tri_table[16*tableIndex + i] == 8) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 8\n"); } else if (tri_table[16*tableIndex + i] == 9) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 9\n"); } else if (tri_table[16*tableIndex + i] == 10) { temp.x = ((float)x/dim_x)+(1.0/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 10\n"); } else if (tri_table[16*tableIndex + i] == 11) { temp.x = ((float)x/dim_x)+0.5/dim_x; temp.y = ((float)y/dim_y)+(1.0/dim_y)+0.5/dim_y; temp.z = ((float)z/dim_z)+(1.0/(2*dim_z))+0.5/dim_z; temp.w = 1.0; //printf("entered 11\n"); } //printf("vertices[%i].x = %f\n", i, temp.x); //printf("vertices[%i].y = %f\n", i, temp.y); //printf("vertices[%i].z = %f\n", i, temp.z); //printf("vertices[%i].w = %f\n", i, temp.w); vertices[threadId*15+i] = temp; } } } // Set up and call get_triangles kernel void call_get_triangles(){ // CUDA taking over vertices buffer from OGL size_t num_bytes; cudaGraphicsMapResources(1, &vbo_resource, 0); cudaGraphicsResourceGetMappedPointer((void **)&vertices, &num_bytes, vbo_resource); // Insert call to get_triangles kernel here get_triangles<<<numBlocks, threadsPerBlock>>>(volume, vertices, tri_table, dim_x, dim_y, dim_z); cudaDeviceSynchronize(); printf("%s\n", cudaGetErrorString(cudaGetLastError())); // CUDA giving back vertices buffer to OGL cudaGraphicsUnmapResources(1, &vbo_resource, 0); } // Set up and call fill_volume kernel void call_fill_volume(float t){ fill_volume<<<numBlocks, threadsPerBlock>>>(volume, t, dim_x, dim_y, dim_z); cudaDeviceSynchronize(); } // Creating vertex buffer in OpenGL void init_vertex_buffer(){ glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); glBufferData(GL_ARRAY_BUFFER, dim_x*dim_y*dim_z*15*4*sizeof(float), 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaGraphicsGLRegisterBuffer(&vbo_resource, vbo, cudaGraphicsMapFlagsWriteDiscard); } // The display function is called at each iteration of the // OGL main loop. It calls the kernels, and draws the result void display(){ sim_time+= 0.1; // Call kernels call_fill_volume(sim_time); call_get_triangles(); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); //Rotate camera glTranslatef(0.5,0.5,0.5); glRotatef(2*sim_time, 0.0, 0.0, 1.0); glTranslatef(-0.5,-0.5,-0.5); //Draw wireframe glTranslatef(0.5,0.5,0.5); glColor3f(0.0, 0.0, 0.0); glutWireCube(1); glTranslatef(-0.5,-0.5,-0.5); // Render vbo as buffer of points glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor3f(0.7, 0.1, 0.3); glDrawArrays(GL_TRIANGLES, 0, dim_x*dim_y*dim_z*15); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); glutPostRedisplay(); } void init_GL(int *argc, char **argv){ glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(512, 512); glutCreateWindow("CUDA Marching Cubes"); glutDisplayFunc(display); glewInit(); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); GLfloat diffuse[] = {1.0,1.0,1.0,1.0}; GLfloat ambient[] = {0.0,0.0,0.0,1.0}; GLfloat specular[] = {1.0,1.0,1.0,1.0}; GLfloat pos[] = {1.0,1.0,0.0,1.0}; glLightfv(GL_LIGHT0, GL_POSITION, pos); glLightfv(GL_LIGHT0, GL_AMBIENT, ambient); glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse); glLightfv(GL_LIGHT0, GL_SPECULAR, specular); glColorMaterial ( GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE ) ; glEnable ( GL_COLOR_MATERIAL ) ; glClearColor(1.0, 1.0, 1.0, 1.0); glDisable(GL_DEPTH_TEST); glViewport(0, 0, 512, 512); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, 1, 0.1, 10.0); gluLookAt(1.5,1.5,1.5,0.5,0.5,0.5,0,0,1); } int main(int argc, char **argv) { // Setting up OpenGL init_GL(&argc, argv); // Setting up OpenGL on CUDA device 0 cudaGLSetGLDevice(0); // Creating vertices buffer in OGL/CUDA init_vertex_buffer(); // Allocate memory for volume cudaMalloc((float**)&volume, sizeof(float)*dim_x*dim_y*dim_z); // Allocate memory and transfer tables cudaMalloc((uint**)&edge_table, sizeof(uint)*256); cudaMalloc((uint**)&tri_table, sizeof(uint)*256*16); cudaMalloc((uint**)&num_verts_table, sizeof(uint)*256); cudaMemcpy(edge_table, edgeTable, sizeof(uint)*256, cudaMemcpyHostToDevice); cudaMemcpy(tri_table, triTable, sizeof(uint)*256*16, cudaMemcpyHostToDevice); cudaMemcpy(num_verts_table, numVertsTable, sizeof(uint)*256, cudaMemcpyHostToDevice); glutMainLoop(); //sim_time = 0.1; //call_fill_volume(sim_time); //call_get_triangles(); }
10bb3b7e3023a9fcab2df10bbed331582b031adf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <helper_cuda.h> #include <helper_timer.h> #include <stdio.h> #include <thrust/inner_product.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #define PI 3.1415926535897932384626433832795 template <typename T> struct abs_diff : public thrust::binary_function<T,T,T> { __host__ __device__ T operator()(const T& a, const T& b) { return ::fabs(b - a); } }; //indexing of shared memory. Threads have 2 more rows and cols (for halo nodes) __device__ inline int getSharedIndex(int thrIdx, int thrIdy) { return (thrIdy * blockDim.x + thrIdx); } //indexing of global memory corresponding to each thread __device__ inline int getGlobalIndex() { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; return col + row*blockDim.x * gridDim.x; } __global__ void JacobiStep(const float *oldMatrix, float *newMatrix) { extern __shared__ float aux[]; int thx = threadIdx.x, thy = threadIdx.y; aux[ getSharedIndex(thx, thy)] = oldMatrix[getGlobalIndex()]; int leftIndex = getSharedIndex(thx-1,thy); int rightIndex = getSharedIndex(thx+1,thy); int topIndex = getSharedIndex(thx,thy-1); int botIndex = getSharedIndex(thx,thy+1); float left, right, top, bot; __syncthreads(); if (thx == 0) { (blockIdx.x == 0) ? left = __sinf((PI*((thy + blockDim.y * blockIdx.y)+1))/ (blockDim.y*gridDim.y+1))* __sinf((PI*((thy + blockDim.y * blockIdx.y)+1))/ (blockDim.y*gridDim.y+1)) : left = oldMatrix[getGlobalIndex()-1] ; } else left = aux[leftIndex]; if (thy == 0) { (blockIdx.y == 0) ? top = 0.0 : top = oldMatrix[getGlobalIndex()-blockDim.x * gridDim.x]; } else top = aux[topIndex]; //right if (thx == blockDim.x-1){ (blockIdx.x == gridDim.x-1) ? right = 0.0f : right = oldMatrix[getGlobalIndex()+1]; } else right = aux[rightIndex]; //bot if (thy == blockDim.y - 1){ (blockIdx.y == gridDim.y - 1) ? bot = 0.0f : bot = oldMatrix[getGlobalIndex()+blockDim.x * gridDim.x]; } else bot = aux[botIndex]; float newValue = 0.25*(left+right+top+bot); newMatrix[getGlobalIndex()] = newValue; } //Returns the maximum absolute difference between the elements of a float vector //on device memory, and a thrust device_vector, both with size matrixSize float GetMaxDiff( float* a, thrust::device_vector<float> b, int matrixSize) { thrust::device_ptr<float> dev_ptra = thrust::device_pointer_cast(a); float init = 0; thrust::maximum<float> binary_op1; abs_diff<float> binary_op2; return thrust::inner_product(dev_ptra,dev_ptra + matrixSize,b.begin(), init, binary_op1, binary_op2); } thrust::host_vector<float> GetAnalyticalMatrix(char* matrixSide, int matrixSize) { char analyticalFilename[50]; analyticalFilename[0]=0; strcpy(analyticalFilename,matrixSide); strcat(analyticalFilename,"_1.dat"); FILE *matrixFile = fopen(analyticalFilename, "rb"); if (matrixFile == NULL) { printf("Error: Analytical solution file not found\n"); exit( -1); } thrust::host_vector<float> analyticalHost(matrixSize); //We read directly from the file to a thrust host vector, //which is then copied to a device vector int n=fread(&analyticalHost[0],sizeof(float),matrixSize,matrixFile); fclose(matrixFile); return analyticalHost; } void PrintResults(char* size,int matrixSide, int final_its,float total_time, float program_time, int matrixSize, float accuracy, float max_abs_diff) { char outputFileName[50]; outputFileName[0]=0; strcpy(outputFileName,size); strcat(outputFileName,"_shared_no_bound_times.txt"); FILE* outfile = fopen(outputFileName,"a+"); if (outfile == NULL) { printf("Error with output file\n"); exit(-1); } printf("Time for matrixSide= %d, %d Iterations. %f ms. Total time: %f.", matrixSide,final_its, total_time, program_time); printf("Memory bandwith is %f GB/s.", ((1e-6)*matrixSize)*2*final_its*sizeof(float)/(total_time)); printf("Accuracy desired: %f (obtained %f)\n",accuracy,max_abs_diff); fprintf(outfile,"Iterations: %d. Time: %f ms. Accuracy desired: %f ", final_its, total_time,accuracy); fprintf(outfile,"(obtained %f). Memory bandwith: %f GB/s\n", max_abs_diff,((1e-6)*matrixSize)*2*final_its*sizeof(float)/(total_time)); fclose(outfile); } int main(int argc, char* argv[]) { LARGE_INTEGER t_ini, t_fin, freq; QueryPerformanceCounter(&t_ini); if (argc != 3) { printf("Usage: %s <matrix_side> <desired_accuracy>\n", argv[0]); return 0; } const int matrixSide = atoi(argv[1]); if (matrixSide%16 != 0) { printf("Error: matrix side must divide 16\n"); return -1; } const float accuracy = atof(argv[2]); if(accuracy > 0.5) { printf("Error: accuracy must be smaller than 0.5\n"); return -1; } char evolutionFileName[50]; evolutionFileName[0]=0; strcpy(evolutionFileName,argv[1]); strcat(evolutionFileName,"_shared_no_bound.txt"); FILE* evolutionFile = fopen (evolutionFileName, "a+"); int maxIterations = 200000; const int matrixSize = (matrixSide)*(matrixSide); {//block to encapuslate thrust thrust::device_vector<float> analyticalDev = GetAnalyticalMatrix(argv[1],matrixSize); float *oldMatrix = 0, *newMatrix = 0; checkCudaErrors( hipMalloc((void**)&oldMatrix, matrixSize*sizeof(float))); checkCudaErrors( hipMalloc((void**)&newMatrix, matrixSize*sizeof(float))); float* hostAuxVector = 0; checkCudaErrors(hipHostMalloc((void**) &hostAuxVector, matrixSize * sizeof(float), hipHostMallocDefault)); for (int i = 0; i < matrixSize; i++) hostAuxVector[i]=0.0f; // Copy input vectors from host memory to GPU buffers. checkCudaErrors(hipMemcpy(oldMatrix, hostAuxVector, matrixSize *sizeof(float), hipMemcpyHostToDevice)); dim3 threadsPerBlock(16, 16); dim3 numBlocks(matrixSide/16, matrixSide/16); hipEvent_t start, stop; float time, total_time = 0.0; hipEventCreate(&start); hipEventCreate(&stop); int final_its; float max_abs_diff=1.0; for (final_its = 0; max_abs_diff > accuracy && final_its < maxIterations; final_its++) { hipEventRecord(start, 0); hipLaunchKernelGGL(( JacobiStep), dim3(numBlocks), dim3(threadsPerBlock), (threadsPerBlock.x+2)* (threadsPerBlock.y+2)*sizeof(float), 0, oldMatrix,newMatrix); hipEventRecord(stop, 0); // 0 - the default stream hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); total_time += time; if ((final_its+1) % 1000 == 0) { max_abs_diff =GetMaxDiff(oldMatrix,analyticalDev,matrixSize); printf("%f\n",max_abs_diff); fprintf(evolutionFile,"%f %f\n",total_time,max_abs_diff); } std::swap(oldMatrix, newMatrix); } hipDeviceSynchronize(); hipEventDestroy(start); hipEventDestroy(stop); max_abs_diff =GetMaxDiff(oldMatrix,analyticalDev,matrixSize); QueryPerformanceCounter(&t_fin);\ QueryPerformanceFrequency(&freq);\ double program_time = (double)(t_fin.QuadPart - t_ini.QuadPart) / (double)freq.QuadPart; fprintf(evolutionFile,"----------------------------\n"); fclose(evolutionFile); PrintResults(argv[1],matrixSide, final_its, total_time, program_time, matrixSize, accuracy, max_abs_diff); checkCudaErrors(hipFree(oldMatrix)); checkCudaErrors(hipFree(newMatrix)); } checkCudaErrors( hipDeviceReset()); }
10bb3b7e3023a9fcab2df10bbed331582b031adf.cu
#include "cuda_runtime.h" #include <helper_cuda.h> #include <helper_timer.h> #include <stdio.h> #include <thrust/inner_product.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #define PI 3.1415926535897932384626433832795 template <typename T> struct abs_diff : public thrust::binary_function<T,T,T> { __host__ __device__ T operator()(const T& a, const T& b) { return std::fabs(b - a); } }; //indexing of shared memory. Threads have 2 more rows and cols (for halo nodes) __device__ inline int getSharedIndex(int thrIdx, int thrIdy) { return (thrIdy * blockDim.x + thrIdx); } //indexing of global memory corresponding to each thread __device__ inline int getGlobalIndex() { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; return col + row*blockDim.x * gridDim.x; } __global__ void JacobiStep(const float *oldMatrix, float *newMatrix) { extern __shared__ float aux[]; int thx = threadIdx.x, thy = threadIdx.y; aux[ getSharedIndex(thx, thy)] = oldMatrix[getGlobalIndex()]; int leftIndex = getSharedIndex(thx-1,thy); int rightIndex = getSharedIndex(thx+1,thy); int topIndex = getSharedIndex(thx,thy-1); int botIndex = getSharedIndex(thx,thy+1); float left, right, top, bot; __syncthreads(); if (thx == 0) { (blockIdx.x == 0) ? left = __sinf((PI*((thy + blockDim.y * blockIdx.y)+1))/ (blockDim.y*gridDim.y+1))* __sinf((PI*((thy + blockDim.y * blockIdx.y)+1))/ (blockDim.y*gridDim.y+1)) : left = oldMatrix[getGlobalIndex()-1] ; } else left = aux[leftIndex]; if (thy == 0) { (blockIdx.y == 0) ? top = 0.0 : top = oldMatrix[getGlobalIndex()-blockDim.x * gridDim.x]; } else top = aux[topIndex]; //right if (thx == blockDim.x-1){ (blockIdx.x == gridDim.x-1) ? right = 0.0f : right = oldMatrix[getGlobalIndex()+1]; } else right = aux[rightIndex]; //bot if (thy == blockDim.y - 1){ (blockIdx.y == gridDim.y - 1) ? bot = 0.0f : bot = oldMatrix[getGlobalIndex()+blockDim.x * gridDim.x]; } else bot = aux[botIndex]; float newValue = 0.25*(left+right+top+bot); newMatrix[getGlobalIndex()] = newValue; } //Returns the maximum absolute difference between the elements of a float vector //on device memory, and a thrust device_vector, both with size matrixSize float GetMaxDiff( float* a, thrust::device_vector<float> b, int matrixSize) { thrust::device_ptr<float> dev_ptra = thrust::device_pointer_cast(a); float init = 0; thrust::maximum<float> binary_op1; abs_diff<float> binary_op2; return thrust::inner_product(dev_ptra,dev_ptra + matrixSize,b.begin(), init, binary_op1, binary_op2); } thrust::host_vector<float> GetAnalyticalMatrix(char* matrixSide, int matrixSize) { char analyticalFilename[50]; analyticalFilename[0]=0; strcpy(analyticalFilename,matrixSide); strcat(analyticalFilename,"_1.dat"); FILE *matrixFile = fopen(analyticalFilename, "rb"); if (matrixFile == NULL) { printf("Error: Analytical solution file not found\n"); exit( -1); } thrust::host_vector<float> analyticalHost(matrixSize); //We read directly from the file to a thrust host vector, //which is then copied to a device vector int n=fread(&analyticalHost[0],sizeof(float),matrixSize,matrixFile); fclose(matrixFile); return analyticalHost; } void PrintResults(char* size,int matrixSide, int final_its,float total_time, float program_time, int matrixSize, float accuracy, float max_abs_diff) { char outputFileName[50]; outputFileName[0]=0; strcpy(outputFileName,size); strcat(outputFileName,"_shared_no_bound_times.txt"); FILE* outfile = fopen(outputFileName,"a+"); if (outfile == NULL) { printf("Error with output file\n"); exit(-1); } printf("Time for matrixSide= %d, %d Iterations. %f ms. Total time: %f.", matrixSide,final_its, total_time, program_time); printf("Memory bandwith is %f GB/s.", ((1e-6)*matrixSize)*2*final_its*sizeof(float)/(total_time)); printf("Accuracy desired: %f (obtained %f)\n",accuracy,max_abs_diff); fprintf(outfile,"Iterations: %d. Time: %f ms. Accuracy desired: %f ", final_its, total_time,accuracy); fprintf(outfile,"(obtained %f). Memory bandwith: %f GB/s\n", max_abs_diff,((1e-6)*matrixSize)*2*final_its*sizeof(float)/(total_time)); fclose(outfile); } int main(int argc, char* argv[]) { LARGE_INTEGER t_ini, t_fin, freq; QueryPerformanceCounter(&t_ini); if (argc != 3) { printf("Usage: %s <matrix_side> <desired_accuracy>\n", argv[0]); return 0; } const int matrixSide = atoi(argv[1]); if (matrixSide%16 != 0) { printf("Error: matrix side must divide 16\n"); return -1; } const float accuracy = atof(argv[2]); if(accuracy > 0.5) { printf("Error: accuracy must be smaller than 0.5\n"); return -1; } char evolutionFileName[50]; evolutionFileName[0]=0; strcpy(evolutionFileName,argv[1]); strcat(evolutionFileName,"_shared_no_bound.txt"); FILE* evolutionFile = fopen (evolutionFileName, "a+"); int maxIterations = 200000; const int matrixSize = (matrixSide)*(matrixSide); {//block to encapuslate thrust thrust::device_vector<float> analyticalDev = GetAnalyticalMatrix(argv[1],matrixSize); float *oldMatrix = 0, *newMatrix = 0; checkCudaErrors( cudaMalloc((void**)&oldMatrix, matrixSize*sizeof(float))); checkCudaErrors( cudaMalloc((void**)&newMatrix, matrixSize*sizeof(float))); float* hostAuxVector = 0; checkCudaErrors(cudaHostAlloc((void**) &hostAuxVector, matrixSize * sizeof(float), cudaHostAllocDefault)); for (int i = 0; i < matrixSize; i++) hostAuxVector[i]=0.0f; // Copy input vectors from host memory to GPU buffers. checkCudaErrors(cudaMemcpy(oldMatrix, hostAuxVector, matrixSize *sizeof(float), cudaMemcpyHostToDevice)); dim3 threadsPerBlock(16, 16); dim3 numBlocks(matrixSide/16, matrixSide/16); cudaEvent_t start, stop; float time, total_time = 0.0; cudaEventCreate(&start); cudaEventCreate(&stop); int final_its; float max_abs_diff=1.0; for (final_its = 0; max_abs_diff > accuracy && final_its < maxIterations; final_its++) { cudaEventRecord(start, 0); JacobiStep<<<numBlocks, threadsPerBlock, (threadsPerBlock.x+2)* (threadsPerBlock.y+2)*sizeof(float)>>>(oldMatrix,newMatrix); cudaEventRecord(stop, 0); // 0 - the default stream cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); total_time += time; if ((final_its+1) % 1000 == 0) { max_abs_diff =GetMaxDiff(oldMatrix,analyticalDev,matrixSize); printf("%f\n",max_abs_diff); fprintf(evolutionFile,"%f %f\n",total_time,max_abs_diff); } std::swap(oldMatrix, newMatrix); } cudaDeviceSynchronize(); cudaEventDestroy(start); cudaEventDestroy(stop); max_abs_diff =GetMaxDiff(oldMatrix,analyticalDev,matrixSize); QueryPerformanceCounter(&t_fin);\ QueryPerformanceFrequency(&freq);\ double program_time = (double)(t_fin.QuadPart - t_ini.QuadPart) / (double)freq.QuadPart; fprintf(evolutionFile,"----------------------------\n"); fclose(evolutionFile); PrintResults(argv[1],matrixSide, final_its, total_time, program_time, matrixSize, accuracy, max_abs_diff); checkCudaErrors(cudaFree(oldMatrix)); checkCudaErrors(cudaFree(newMatrix)); } checkCudaErrors( cudaDeviceReset()); }
f948739511693b9d6ab1eea038be99505455f227.hip
// !!! This is a file automatically generated by hipify!!! /* * ------------------------------------------------------------------------------ * * MIT License * * Copyright (c) 2021 Parallel Applications Modelling Group - GMAP * GMAP website: https://gmap.pucrs.br * * Pontifical Catholic University of Rio Grande do Sul (PUCRS) * Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ------------------------------------------------------------------------------ * * The original NPB 3.4 version was written in Fortran and belongs to: * http://www.nas.nasa.gov/Software/NPB/ * * Authors of the Fortran code: * R. Van der Wijngaart * W. Saphir * H. Jin * * ------------------------------------------------------------------------------ * * The serial C++ version is a translation of the original NPB 3.4 * Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER * * Authors of the C++ code: * Dalvan Griebler <dalvangriebler@gmail.com> * Gabriell Araujo <hexenoften@gmail.com> * Jnior Lff <loffjh@gmail.com> * * ------------------------------------------------------------------------------ * * The CUDA version is a parallel implementation of the serial C++ version * CUDA version: https://github.com/GMAP/NPB-GPU/tree/master/CUDA * * Authors of the CUDA code: * Gabriell Araujo <hexenoften@gmail.com> * * ------------------------------------------------------------------------------ */ #include <hip/hip_runtime.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" #define IMAX (PROBLEM_SIZE) #define JMAX (PROBLEM_SIZE) #define KMAX (PROBLEM_SIZE) #define IMAXP (IMAX/2*2) #define JMAXP (JMAX/2*2) #define PROFILING_TOTAL_TIME (0) #define PROFILING_ADD (1) #define PROFILING_COMPUTE_RHS_1 (2) #define PROFILING_COMPUTE_RHS_2 (3) #define PROFILING_ERROR_NORM_1 (4) #define PROFILING_ERROR_NORM_2 (5) #define PROFILING_EXACT_RHS_1 (6) #define PROFILING_EXACT_RHS_2 (7) #define PROFILING_EXACT_RHS_3 (8) #define PROFILING_EXACT_RHS_4 (9) #define PROFILING_INITIALIZE (10) #define PROFILING_RHS_NORM_1 (11) #define PROFILING_RHS_NORM_2 (12) #define PROFILING_TXINVR (13) #define PROFILING_X_SOLVE (14) #define PROFILING_Y_SOLVE (15) #define PROFILING_Z_SOLVE (16) /* gpu linear pattern */ #define u(m,i,j,k) u[(i)+nx*((j)+ny*((k)+nz*(m)))] #define forcing(m,i,j,k) forcing[(i)+nx*((j)+ny*((k)+nz*(m)))] #define rhs(m,i,j,k) rhs[m+(i)*5+(j)*5*nx+(k)*5*nx*ny] #define rho_i(i,j,k) rho_i[i+(j)*nx+(k)*nx*ny] #define us(i,j,k) us[i+(j)*nx+(k)*nx*ny] #define vs(i,j,k) vs[i+(j)*nx+(k)*nx*ny] #define ws(i,j,k) ws[i+(j)*nx+(k)*nx*ny] #define square(i,j,k) square[i+(j)*nx+(k)*nx*ny] #define qs(i,j,k) qs[i+(j)*nx+(k)*nx*ny] #define speed(i,j,k) speed[i+(j)*nx+(k)*nx*ny] /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static double u_host[KMAX][JMAXP+1][IMAXP+1][5]; static double us_host[KMAX][JMAXP+1][IMAXP+1]; static double vs_host[KMAX][JMAXP+1][IMAXP+1]; static double ws_host[KMAX][JMAXP+1][IMAXP+1]; static double qs_host[KMAX][JMAXP+1][IMAXP+1]; static double rho_i_host[KMAX][JMAXP+1][IMAXP+1]; static double speed_host[KMAX][JMAXP+1][IMAXP+1]; static double square_host[KMAX][JMAXP+1][IMAXP+1]; static double rhs_host[KMAX][JMAXP+1][IMAXP+1][5]; static double forcing_host[KMAX][JMAXP+1][IMAXP+1][5]; static double cv_host[PROBLEM_SIZE]; static double rhon_host[PROBLEM_SIZE]; static double rhos_host[PROBLEM_SIZE]; static double rhoq_host[PROBLEM_SIZE]; static double cuf_host[PROBLEM_SIZE]; static double q_host[PROBLEM_SIZE]; static double ue_host[5][PROBLEM_SIZE]; static double buf_host[5][PROBLEM_SIZE]; static double lhs_host[IMAXP+1][IMAXP+1][5]; static double lhsp_host[IMAXP+1][IMAXP+1][5]; static double lhsm_host[IMAXP+1][IMAXP+1][5]; static double ce_host[13][5]; #else static double (*u_host)[JMAXP+1][IMAXP+1][5]=(double(*)[JMAXP+1][IMAXP+1][5])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1)*(5))); static double (*us_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*vs_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*ws_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*qs_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*rho_i_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*speed_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*square_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*rhs_host)[JMAXP+1][IMAXP+1][5]=(double(*)[JMAXP+1][IMAXP+1][5])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1)*(5))); static double (*forcing_host)[JMAXP+1][IMAXP+1][5]=(double(*)[JMAXP+1][IMAXP+1][5])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1)*(5))); static double (*cv_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*rhon_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*rhos_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*rhoq_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*cuf_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*q_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*ue_host)[PROBLEM_SIZE]=(double(*)[PROBLEM_SIZE])malloc(sizeof(double)*((PROBLEM_SIZE)*(5))); static double (*buf_host)[PROBLEM_SIZE]=(double(*)[PROBLEM_SIZE])malloc(sizeof(double)*((PROBLEM_SIZE)*(5))); static double (*lhs_host)[IMAXP+1][5]=(double(*)[IMAXP+1][5])malloc(sizeof(double)*((IMAXP+1)*(IMAXP+1)*(5))); static double (*lhsp_host)[IMAXP+1][5]=(double(*)[IMAXP+1][5])malloc(sizeof(double)*((IMAXP+1)*(IMAXP+1)*(5))); static double (*lhsm_host)[IMAXP+1][5]=(double(*)[IMAXP+1][5])malloc(sizeof(double)*((IMAXP+1)*(IMAXP+1)*(5))); static double (*ce_host)[5]=(double(*)[5])malloc(sizeof(double)*((13)*(5))); #endif static int grid_points[3]; static double dt_host; /* gpu variables */ static double* u_device; static double* forcing_device; static double* rhs_device; static double* rho_i_device; static double* us_device; static double* vs_device; static double* ws_device; static double* qs_device; static double* speed_device; static double* square_device; static double* lhs_device; static double* rhs_buffer_device; static double* rms_buffer_device; static size_t size_u_device; static size_t size_forcing_device; static size_t size_rhs_device; static size_t size_rho_i_device; static size_t size_us_device; static size_t size_vs_device; static size_t size_ws_device; static size_t size_qs_device; static size_t size_speed_device; static size_t size_square_device; static size_t size_lhs_device; static size_t size_rhs_buffer_device; static size_t size_rms_buffer_device; static int nx; static int ny; static int nz; static int THREADS_PER_BLOCK_ON_ADD; static int THREADS_PER_BLOCK_ON_COMPUTE_RHS_1; static int THREADS_PER_BLOCK_ON_COMPUTE_RHS_2; static int THREADS_PER_BLOCK_ON_ERROR_NORM_1; static int THREADS_PER_BLOCK_ON_ERROR_NORM_2; static int THREADS_PER_BLOCK_ON_EXACT_RHS_1; static int THREADS_PER_BLOCK_ON_EXACT_RHS_2; static int THREADS_PER_BLOCK_ON_EXACT_RHS_3; static int THREADS_PER_BLOCK_ON_EXACT_RHS_4; static int THREADS_PER_BLOCK_ON_INITIALIZE; static int THREADS_PER_BLOCK_ON_RHS_NORM_1; static int THREADS_PER_BLOCK_ON_RHS_NORM_2; static int THREADS_PER_BLOCK_ON_TXINVR; static int THREADS_PER_BLOCK_ON_X_SOLVE; static int THREADS_PER_BLOCK_ON_Y_SOLVE; static int THREADS_PER_BLOCK_ON_Z_SOLVE; int gpu_device_id; int total_devices; hipDeviceProp_t gpu_device_properties; extern __shared__ double extern_share_data[]; namespace constants_device{ __constant__ double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3, dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4, dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt, dxmax, dymax, dzmax, xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt, dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16, ce[13][5]; } /* function prototypes */ static void add_gpu(); __global__ static void add_gpu_kernel(double* u, const double* rhs, const int nx, const int ny, const int nz); static void adi_gpu(); static void compute_rhs_gpu(); __global__ static void compute_rhs_gpu_kernel_1(double* rho_i, double* us, double* vs, double* ws, double* speed, double* qs, double* square, const double* u, const int nx, const int ny, const int nz); __global__ static void compute_rhs_gpu_kernel_2(const double* rho_i, const double* us, const double* vs, const double* ws, const double* qs, const double* square, double* rhs, const double* forcing, const double* u, const int nx, const int ny, const int nz); static void error_norm_gpu(double rms[]); __global__ static void error_norm_gpu_kernel_1(double* rms, const double* u, const int nx, const int ny, const int nz); __global__ static void error_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz); static void exact_rhs_gpu(); __global__ static void exact_rhs_gpu_kernel_1(double* forcing, const int nx, const int ny, const int nz); __global__ static void exact_rhs_gpu_kernel_2(double* forcing, const int nx, const int ny, const int nz); __global__ static void exact_rhs_gpu_kernel_3(double* forcing, const int nx, const int ny, const int nz); __global__ static void exact_rhs_gpu_kernel_4(double* forcing, const int nx, const int ny, const int nz); __device__ static void exact_solution_gpu_device(const double xi, const double eta, const double zeta, double* dtemp); static void initialize_gpu(); __global__ static void initialize_gpu_kernel(double* u, const int nx, const int ny, const int nz); static void release_gpu(); static void rhs_norm_gpu(double rms[]); __global__ static void rhs_norm_gpu_kernel_1(double* rms, const double* rhs, const int nx, const int ny, const int nz); __global__ static void rhs_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz); static void set_constants(); static void setup_gpu(); static void txinvr_gpu(); __global__ static void txinvr_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, double* rhs, const int nx, const int ny, const int nz); static void verify_gpu(int no_time_steps, char* class_npb, boolean* verified); static void x_solve_gpu(); __global__ static void x_solve_gpu_kernel(const double* rho_i, const double* us, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz); static void y_solve_gpu(); __global__ static void y_solve_gpu_kernel(const double* rho_i, const double* vs, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz); static void z_solve_gpu(); __global__ static void z_solve_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, const double* u, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz); /* sp */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif #if defined(PROFILING) printf(" PROFILING mode on\n"); #endif int niter, step, n3; double mflops, t, tmax; boolean verified; char class_npb; /* * --------------------------------------------------------------------- * read input file (if it exists), else take * defaults from parameters * --------------------------------------------------------------------- */ FILE* fp; if((fp=fopen("inputsp.data","r"))!=NULL){ int result; printf(" Reading from input file inputsp.data\n"); result=fscanf(fp,"%d", &niter); while(fgetc(fp)!='\n'); result=fscanf(fp,"%lf",&dt_host); while(fgetc(fp)!='\n'); result=fscanf(fp,"%d%d%d",&grid_points[0],&grid_points[1],&grid_points[2]); result++; fclose(fp); }else{ printf(" No input file inputsp.data. Using compiled defaults\n"); niter=NITER_DEFAULT; dt_host=DT_DEFAULT; grid_points[0]=PROBLEM_SIZE; grid_points[1]=PROBLEM_SIZE; grid_points[2]=PROBLEM_SIZE; } printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - SP Benchmark\n\n"); printf(" Size: %4dx%4dx%4d\n",grid_points[0],grid_points[1],grid_points[2]); printf(" Iterations: %4d dt: %10.6f\n",niter,dt_host); printf("\n"); if((grid_points[0]>IMAX)||(grid_points[1]>JMAX)||(grid_points[2]>KMAX)){ printf(" %d, %d, %d\n",grid_points[0],grid_points[1],grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); return 0; } nx=grid_points[0]; ny=grid_points[1]; nz=grid_points[2]; setup_gpu(); set_constants(); timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_ADD); timer_clear(PROFILING_COMPUTE_RHS_1); timer_clear(PROFILING_COMPUTE_RHS_2); timer_clear(PROFILING_ERROR_NORM_1); timer_clear(PROFILING_ERROR_NORM_2); timer_clear(PROFILING_EXACT_RHS_1); timer_clear(PROFILING_EXACT_RHS_2); timer_clear(PROFILING_EXACT_RHS_3); timer_clear(PROFILING_EXACT_RHS_4); timer_clear(PROFILING_INITIALIZE); timer_clear(PROFILING_RHS_NORM_1); timer_clear(PROFILING_RHS_NORM_2); timer_clear(PROFILING_TXINVR); timer_clear(PROFILING_X_SOLVE); timer_clear(PROFILING_Y_SOLVE); timer_clear(PROFILING_Z_SOLVE); #endif exact_rhs_gpu(); initialize_gpu(); /* * --------------------------------------------------------------------- * do one time step to touch all code, and reinitialize * --------------------------------------------------------------------- */ adi_gpu(); initialize_gpu(); timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_ADD); timer_clear(PROFILING_COMPUTE_RHS_1); timer_clear(PROFILING_COMPUTE_RHS_2); timer_clear(PROFILING_ERROR_NORM_1); timer_clear(PROFILING_ERROR_NORM_2); timer_clear(PROFILING_EXACT_RHS_1); timer_clear(PROFILING_EXACT_RHS_2); timer_clear(PROFILING_EXACT_RHS_3); timer_clear(PROFILING_EXACT_RHS_4); timer_clear(PROFILING_INITIALIZE); timer_clear(PROFILING_RHS_NORM_1); timer_clear(PROFILING_RHS_NORM_2); timer_clear(PROFILING_TXINVR); timer_clear(PROFILING_X_SOLVE); timer_clear(PROFILING_Y_SOLVE); timer_clear(PROFILING_Z_SOLVE); #endif timer_start(PROFILING_TOTAL_TIME);/*#start_timer*/ for(step=1;step<=niter;step++){ if((step%20)==0||step==1){printf(" Time step %4d\n",step);} adi_gpu(); } timer_stop(PROFILING_TOTAL_TIME);/*#stop_timer*/ tmax=timer_read(PROFILING_TOTAL_TIME); verify_gpu(niter, &class_npb, &verified); if(tmax!=0.0){ n3=grid_points[0]*grid_points[1]*grid_points[2]; t=(grid_points[0]+grid_points[1]+grid_points[2])/3.0; mflops=(881.174*(double)n3- 4683.91*(t*t)+ 11484.5*t- 19272.4)*(double)niter/(tmax*1000000.0); }else{ mflops=0.0; } char gpu_config[256]; char gpu_config_string[2048]; #if defined(PROFILING) sprintf(gpu_config, "%5s\t%25s\t%25s\t%25s\n", "GPU Kernel", "Threads Per Block", "Time in Seconds", "Time in Percentage"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-add", THREADS_PER_BLOCK_ON_ADD, timer_read(PROFILING_ADD), (timer_read(PROFILING_ADD)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-compute-rhs-1", THREADS_PER_BLOCK_ON_COMPUTE_RHS_1, timer_read(PROFILING_COMPUTE_RHS_1), (timer_read(PROFILING_COMPUTE_RHS_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-compute-rhs-2", THREADS_PER_BLOCK_ON_COMPUTE_RHS_2, timer_read(PROFILING_COMPUTE_RHS_2), (timer_read(PROFILING_COMPUTE_RHS_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-error-norm-1", THREADS_PER_BLOCK_ON_ERROR_NORM_1, timer_read(PROFILING_ERROR_NORM_1), (timer_read(PROFILING_ERROR_NORM_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-error-norm-2", THREADS_PER_BLOCK_ON_ERROR_NORM_2, timer_read(PROFILING_ERROR_NORM_2), (timer_read(PROFILING_ERROR_NORM_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-1", THREADS_PER_BLOCK_ON_EXACT_RHS_1, timer_read(PROFILING_EXACT_RHS_1), (timer_read(PROFILING_EXACT_RHS_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-2", THREADS_PER_BLOCK_ON_EXACT_RHS_2, timer_read(PROFILING_EXACT_RHS_2), (timer_read(PROFILING_EXACT_RHS_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-3", THREADS_PER_BLOCK_ON_EXACT_RHS_3, timer_read(PROFILING_EXACT_RHS_3), (timer_read(PROFILING_EXACT_RHS_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-4", THREADS_PER_BLOCK_ON_EXACT_RHS_4, timer_read(PROFILING_EXACT_RHS_4), (timer_read(PROFILING_EXACT_RHS_4)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-initialize", THREADS_PER_BLOCK_ON_INITIALIZE, timer_read(PROFILING_INITIALIZE), (timer_read(PROFILING_INITIALIZE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-rhs-norm-1", THREADS_PER_BLOCK_ON_RHS_NORM_1, timer_read(PROFILING_RHS_NORM_1), (timer_read(PROFILING_RHS_NORM_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-rhs-norm-2", THREADS_PER_BLOCK_ON_RHS_NORM_2, timer_read(PROFILING_RHS_NORM_2), (timer_read(PROFILING_RHS_NORM_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-txinvr", THREADS_PER_BLOCK_ON_TXINVR, timer_read(PROFILING_TXINVR), (timer_read(PROFILING_TXINVR)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-x-solve", THREADS_PER_BLOCK_ON_X_SOLVE, timer_read(PROFILING_X_SOLVE), (timer_read(PROFILING_X_SOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-y-solve", THREADS_PER_BLOCK_ON_Y_SOLVE, timer_read(PROFILING_Y_SOLVE), (timer_read(PROFILING_Y_SOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-z-solve", THREADS_PER_BLOCK_ON_Z_SOLVE, timer_read(PROFILING_Z_SOLVE), (timer_read(PROFILING_Z_SOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); #else sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-add", THREADS_PER_BLOCK_ON_ADD); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-compute-rhs-1", THREADS_PER_BLOCK_ON_COMPUTE_RHS_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-compute-rhs-2", THREADS_PER_BLOCK_ON_COMPUTE_RHS_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-error-norm-1", THREADS_PER_BLOCK_ON_ERROR_NORM_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-error-norm-2", THREADS_PER_BLOCK_ON_ERROR_NORM_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-1", THREADS_PER_BLOCK_ON_EXACT_RHS_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-2", THREADS_PER_BLOCK_ON_EXACT_RHS_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-3", THREADS_PER_BLOCK_ON_EXACT_RHS_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-4", THREADS_PER_BLOCK_ON_EXACT_RHS_4); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-initialize", THREADS_PER_BLOCK_ON_INITIALIZE); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-rhs-norm-1", THREADS_PER_BLOCK_ON_RHS_NORM_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-rhs-norm-2", THREADS_PER_BLOCK_ON_RHS_NORM_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-txinvr", THREADS_PER_BLOCK_ON_TXINVR); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-x-solve", THREADS_PER_BLOCK_ON_X_SOLVE); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-y-solve", THREADS_PER_BLOCK_ON_Y_SOLVE); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-z-solve", THREADS_PER_BLOCK_ON_Z_SOLVE); strcat(gpu_config_string, gpu_config); #endif c_print_results((char*)"SP", class_npb, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)COMPILERVERSION, (char*)LIBVERSION, (char*)CPU_MODEL, (char*)gpu_device_properties.name, gpu_config_string, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)"(none)"); release_gpu(); return 0; } /* * --------------------------------------------------------------------- * addition of update to the vector u * --------------------------------------------------------------------- */ static void add_gpu(){ #if defined(PROFILING) timer_start(PROFILING_ADD); #endif /* #KERNEL ADD */ int add_workload = nx * ny * nz; int add_threads_per_block = THREADS_PER_BLOCK_ON_ADD; int add_blocks_per_grid = (ceil((double)add_workload/(double)add_threads_per_block)); hipLaunchKernelGGL(( add_gpu_kernel), dim3( add_blocks_per_grid), dim3( add_threads_per_block), 0, 0, u_device, rhs_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_ADD); #endif } /* * --------------------------------------------------------------------- * addition of update to the vector u * --------------------------------------------------------------------- */ __global__ static void add_gpu_kernel(double* u, const double* rhs, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } /* array(m,i,j,k) */ u(0,i,j,k)+=rhs(0,i,j,k); u(1,i,j,k)+=rhs(1,i,j,k); u(2,i,j,k)+=rhs(2,i,j,k); u(3,i,j,k)+=rhs(3,i,j,k); u(4,i,j,k)+=rhs(4,i,j,k); } static void adi_gpu(){ compute_rhs_gpu(); txinvr_gpu(); x_solve_gpu(); y_solve_gpu(); z_solve_gpu(); add_gpu(); } static void compute_rhs_gpu(){ #if defined(PROFILING) timer_start(PROFILING_COMPUTE_RHS_1); #endif /* #KERNEL COMPUTE RHS 1 */ int compute_rhs_1_workload = nx * ny * nz; int compute_rhs_1_threads_per_block = THREADS_PER_BLOCK_ON_COMPUTE_RHS_1; int compute_rhs_1_blocks_per_grid = (ceil((double)compute_rhs_1_workload/(double)compute_rhs_1_threads_per_block)); hipLaunchKernelGGL(( compute_rhs_gpu_kernel_1), dim3( compute_rhs_1_blocks_per_grid), dim3( compute_rhs_1_threads_per_block), 0, 0, rho_i_device, us_device, vs_device, ws_device, speed_device, qs_device, square_device, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_COMPUTE_RHS_1); #endif #if defined(PROFILING) timer_start(PROFILING_COMPUTE_RHS_2); #endif /* #KERNEL COMPUTE RHS 2 */ int compute_rhs_2_threads_per_block; dim3 compute_rhs_2_blocks_per_grid(ny, nz); if(THREADS_PER_BLOCK_ON_COMPUTE_RHS_2 != nx){ compute_rhs_2_threads_per_block = nx; } else{ compute_rhs_2_threads_per_block = THREADS_PER_BLOCK_ON_COMPUTE_RHS_2; } hipLaunchKernelGGL(( compute_rhs_gpu_kernel_2), dim3( compute_rhs_2_blocks_per_grid), dim3( compute_rhs_2_threads_per_block), 0, 0, rho_i_device, us_device, vs_device, ws_device, qs_device, square_device, rhs_device, forcing_device, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_COMPUTE_RHS_2); #endif } __global__ static void compute_rhs_gpu_kernel_1(double* rho_i, double* us, double* vs, double* ws, double* speed, double* qs, double* square, const double* u, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } using namespace constants_device; /* * --------------------------------------------------------------------- * compute the reciprocal of density, and the kinetic energy, * and the speed of sound. * --------------------------------------------------------------------- */ double rho_inv=1.0/u(0,i,j,k); double square_ijk; rho_i(i,j,k)=rho_inv; us(i,j,k)=u(1,i,j,k)*rho_inv; vs(i,j,k)=u(2,i,j,k)*rho_inv; ws(i,j,k)=u(3,i,j,k)*rho_inv; square(i,j,k)=square_ijk=0.5*(u(1,i,j,k)*u(1,i,j,k)+u(2,i,j,k)*u(2,i,j,k)+u(3,i,j,k)*u(3,i,j,k))*rho_inv; qs(i,j,k)=square_ijk*rho_inv; /* * --------------------------------------------------------------------- * (don't need speed and ainx until the lhs computation) * --------------------------------------------------------------------- */ speed(i,j,k)=sqrt(c1c2*rho_inv*(u(4,i,j,k)-square_ijk)); } __global__ static void compute_rhs_gpu_kernel_2(const double* rho_i, const double* us, const double* vs, const double* ws, const double* qs, const double* square, double* rhs, const double* forcing, const double* u, const int nx, const int ny, const int nz){ int i, j, k, m; k=blockIdx.y; j=blockIdx.x; i=threadIdx.x; double rtmp[5]; using namespace constants_device; /* * --------------------------------------------------------------------- * copy the exact forcing term to the right hand side; because * this forcing term is known, we can store it on the whole grid * including the boundary * --------------------------------------------------------------------- */ for(m=0;m<5;m++){rtmp[m]=forcing(m,i,j,k);} /* * --------------------------------------------------------------------- * compute xi-direction fluxes * --------------------------------------------------------------------- */ if(k>=1 && k<nz-1 && j>=1 && j<ny-1 && i>=1 && i<nx-1){ double uijk=us(i,j,k); double up1=us(i+1,j,k); double um1=us(i-1,j,k); rtmp[0]=rtmp[0]+dx1tx1*(u(0,i+1,j,k)-2.0*u(0,i,j,k)+u(0,i-1,j,k))-tx2*(u(1,i+1,j,k)-u(1,i-1,j,k)); rtmp[1]=rtmp[1]+dx2tx1*(u(1,i+1,j,k)-2.0*u(1,i,j,k)+u(1,i-1,j,k))+xxcon2*con43*(up1-2.0*uijk+um1)-tx2*(u(1,i+1,j,k)*up1-u(1,i-1,j,k)*um1+(u(4,i+1,j,k)-square(i+1,j,k)-u(4,i-1,j,k)+square(i-1,j,k))*c2); rtmp[2]=rtmp[2]+dx3tx1*(u(2,i+1,j,k)-2.0*u(2,i,j,k)+u(2,i-1,j,k))+xxcon2*(vs(i+1,j,k)-2.0*vs(i,j,k)+vs(i-1,j,k))-tx2*(u(2,i+1,j,k)*up1-u(2,i-1,j,k)*um1); rtmp[3]=rtmp[3]+dx4tx1*(u(3,i+1,j,k)-2.0*u(3,i,j,k)+u(3,i-1,j,k))+xxcon2*(ws(i+1,j,k)-2.0*ws(i,j,k)+ws(i-1,j,k))-tx2*(u(3,i+1,j,k)*up1-u(3,i-1,j,k)*um1); rtmp[4]=rtmp[4]+dx5tx1*(u(4,i+1,j,k)-2.0*u(4,i,j,k)+u(4,i-1,j,k))+xxcon3*(qs(i+1,j,k)-2.0*qs(i,j,k)+qs(i-1,j,k))+ xxcon4*(up1*up1-2.0*uijk*uijk+um1*um1)+xxcon5*(u(4,i+1,j,k)*rho_i(i+1,j,k)-2.0*u(4,i,j,k)*rho_i(i,j,k)+u(4,i-1,j,k)*rho_i(i-1,j,k))-tx2*((c1*u(4,i+1,j,k)-c2*square(i+1,j,k))*up1-(c1*u(4,i-1,j,k)-c2*square(i-1,j,k))*um1); /* * --------------------------------------------------------------------- * add fourth order xi-direction dissipation * --------------------------------------------------------------------- */ if(i==1){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(5.0*u(m,i,j,k)-4.0*u(m,i+1,j,k)+u(m,i+2,j,k));} }else if(i==2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(-4.0*u(m,i-1,j,k)+6.0*u(m,i,j,k)-4.0*u(m,i+1,j,k)+u(m,i+2,j,k));} }else if(i>=3 && i<nx-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i-2,j,k)-4.0*u(m,i-1,j,k)+6.0*u(m,i,j,k)-4.0*u(m,i+1,j,k)+u(m,i+2,j,k));} }else if(i==nx-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i-2,j,k)-4.0*u(m,i-1,j,k)+6.0*u(m,i,j,k)-4.0*u(m,i+1,j,k));} }else if(i==nx-2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i-2,j,k)-4.0*u(m,i-1,j,k) + 5.0*u(m,i,j,k));} } /* * --------------------------------------------------------------------- * compute eta-direction fluxes * --------------------------------------------------------------------- */ double vijk=vs(i,j,k); double vp1=vs(i,j+1,k); double vm1=vs(i,j-1,k); rtmp[0]=rtmp[0]+dy1ty1*(u(0,i,j+1,k)-2.0*u(0,i,j,k)+u(0,i,j-1,k))-ty2*(u(2,i,j+1,k)-u(2,i,j-1,k)); rtmp[1]=rtmp[1]+dy2ty1*(u(1,i,j+1,k)-2.0*u(1,i,j,k)+u(1,i,j-1,k))+yycon2*(us(i,j+1,k)-2.0*us(i,j,k)+us(i,j-1,k))-ty2*(u(1,i,j+1,k)*vp1-u(1,i,j-1,k)*vm1); rtmp[2]=rtmp[2]+dy3ty1*(u(2,i,j+1,k)-2.0*u(2,i,j,k)+u(2,i,j-1,k))+yycon2*con43*(vp1-2.0*vijk+vm1)-ty2*(u(2,i,j+1,k)*vp1-u(2,i,j-1,k)*vm1+(u(4,i,j+1,k)-square(i,j+1,k)-u(4,i,j-1,k)+square(i,j-1,k))*c2); rtmp[3]=rtmp[3]+dy4ty1*(u(3,i,j+1,k)-2.0*u(3,i,j,k)+u(3,i,j-1,k))+yycon2*(ws(i,j+1,k)-2.0*ws(i,j,k)+ws(i,j-1,k))-ty2*(u(3,i,j+1,k)*vp1-u(3,i,j-1,k)*vm1); rtmp[4]=rtmp[4]+dy5ty1*(u(4,i,j+1,k)-2.0*u(4,i,j,k)+u(4,i,j-1,k))+yycon3*(qs(i,j+1,k)-2.0*qs(i,j,k)+qs(i,j-1,k))+yycon4*(vp1*vp1-2.0*vijk*vijk+vm1*vm1)+yycon5*(u(4,i,j+1,k)*rho_i(i,j+1,k)-2.0*u(4,i,j,k)*rho_i(i,j,k)+u(4,i,j-1,k)*rho_i(i,j-1,k))-ty2*((c1*u(4,i,j+1,k)-c2*square(i,j+1,k))*vp1-(c1*u(4,i,j-1,k)-c2*square(i,j-1,k))*vm1); /* * --------------------------------------------------------------------- * add fourth order eta-direction dissipation * --------------------------------------------------------------------- */ if(j==1){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(5.0*u(m,i,j,k)-4.0*u(m,i,j+1,k)+u(m,i,j+2,k));} }else if(j==2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(-4.0*u(m,i,j-1,k)+6.0*u(m,i,j,k)-4.0*u(m,i,j+1,k)+u(m,i,j+2,k));} }else if(j>=3 && j<ny-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j-2,k)-4.0*u(m,i,j-1,k)+6.0*u(m,i,j,k)-4.0*u(m,i,j+1,k)+u(m,i,j+2,k));} }else if(j==ny-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j-2,k)-4.0*u(m,i,j-1,k)+6.0*u(m,i,j,k)-4.0*u(m,i,j+1,k));} }else if(j==ny-2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j-2,k)-4.0*u(m,i,j-1,k)+5.0*u(m,i,j,k));} } /* * --------------------------------------------------------------------- * compute zeta-direction fluxes * --------------------------------------------------------------------- */ double wijk=ws(i,j,k); double wp1=ws(i,j,k+1); double wm1=ws(i,j,k-1); rtmp[0]=rtmp[0]+dz1tz1*(u(0,i,j,k+1)-2.0*u(0,i,j,k)+u(0,i,j,k-1))-tz2*(u(3,i,j,k+1)-u(3,i,j,k-1)); rtmp[1]=rtmp[1]+dz2tz1*(u(1,i,j,k+1)-2.0*u(1,i,j,k)+u(1,i,j,k-1))+zzcon2*(us(i,j,k+1)-2.0*us(i,j,k)+us(i,j,k-1))-tz2*(u(1,i,j,k+1)*wp1-u(1,i,j,k-1)*wm1); rtmp[2]=rtmp[2]+dz3tz1*(u(2,i,j,k+1)-2.0*u(2,i,j,k)+u(2,i,j,k-1))+zzcon2*(vs(i,j,k+1)-2.0*vs(i,j,k)+vs(i,j,k-1))-tz2*(u(2,i,j,k+1)*wp1-u(2,i,j,k-1)*wm1); rtmp[3]=rtmp[3]+dz4tz1*(u(3,i,j,k+1)-2.0*u(3,i,j,k)+u(3,i,j,k-1))+zzcon2*con43*(wp1-2.0*wijk+wm1)-tz2*(u(3,i,j,k+1)*wp1-u(3,i,j,k-1)*wm1+(u(4,i,j,k+1)-square(i,j,k+1)-u(4,i,j,k-1)+square(i,j,k-1))*c2); rtmp[4]=rtmp[4]+dz5tz1*(u(4,i,j,k+1)-2.0*u(4,i,j,k)+u(4,i,j,k-1))+zzcon3*(qs(i,j,k+1)-2.0*qs(i,j,k)+qs(i,j,k-1))+zzcon4*(wp1*wp1-2.0*wijk*wijk+wm1*wm1)+zzcon5*(u(4,i,j,k+1)*rho_i(i,j,k+1)-2.0*u(4,i,j,k)*rho_i(i,j,k)+u(4,i,j,k-1)*rho_i(i,j,k-1))-tz2*((c1*u(4,i,j,k+1)-c2*square(i,j,k+1))*wp1-(c1*u(4,i,j,k-1)-c2*square(i,j,k-1))*wm1); /* * --------------------------------------------------------------------- * add fourth order zeta-direction dissipation * --------------------------------------------------------------------- */ if(k==1){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(5.0*u(m,i,j,k)-4.0*u(m,i,j,k+1)+u(m,i,j,k+2));} }else if(k==2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(-4.0*u(m,i,j,k-1)+6.0*u(m,i,j,k)-4.0*u(m,i,j,k+1)+u(m,i,j,k+2));} }else if(k>=3 && k<nz-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j,k-2)-4.0*u(m,i,j,k-1)+6.0*u(m,i,j,k)-4.0*u(m,i,j,k+1)+u(m,i,j,k+2));} }else if(k==nz-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j,k-2)-4.0*u(m,i,j,k-1)+6.0*u(m,i,j,k)-4.0*u(m,i,j,k+1));} }else if(k==nz-2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j,k-2)-4.0*u(m,i,j,k-1)+5.0*u(m,i,j,k));} } for(m=0;m<5;m++){rtmp[m]*=dt;} } for(m=0;m<5;m++){rhs(m,i,j,k)=rtmp[m];} } /* * --------------------------------------------------------------------- * this function computes the norm of the difference between the * computed solution and the exact solution * --------------------------------------------------------------------- */ static void error_norm_gpu(double rms[]){ #if defined(PROFILING) timer_start(PROFILING_ERROR_NORM_1); #endif /* #KERNEL ERROR NORM 1 */ int error_norm_1_threads_per_block = THREADS_PER_BLOCK_ON_ERROR_NORM_1; dim3 error_norm_1_blocks_per_grid(ny, nx); hipLaunchKernelGGL(( error_norm_gpu_kernel_1), dim3( error_norm_1_blocks_per_grid), dim3( error_norm_1_threads_per_block), 0, 0, rms_buffer_device, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_ERROR_NORM_1); #endif #if defined(PROFILING) timer_start(PROFILING_ERROR_NORM_2); #endif /* #KERNEL ERROR NORM 2 */ int error_norm_2_threads_per_block = THREADS_PER_BLOCK_ON_ERROR_NORM_2; int error_norm_2_blocks_per_grid = 1; hipLaunchKernelGGL(( error_norm_gpu_kernel_2), dim3( error_norm_2_blocks_per_grid), dim3( error_norm_2_threads_per_block), sizeof(double)*error_norm_2_threads_per_block*5, 0, rms_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_ERROR_NORM_2); #endif hipMemcpy(rms, rms_buffer_device, 5*sizeof(double), hipMemcpyDeviceToHost); } __global__ static void error_norm_gpu_kernel_1(double* rms, const double* u, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, u_exact[5], rms_loc[5]; j=blockIdx.x*blockDim.x+threadIdx.x; i=blockIdx.y*blockDim.y+threadIdx.y; if(j>=ny || i>=nx){return;} using namespace constants_device; for(m=0;m<5;m++){rms_loc[m]=0.0;} xi=(double)i*dnxm1; eta=(double)j*dnym1; for(k=0; k<nz; k++){ zeta=(double)k*dnzm1; exact_solution_gpu_device(xi, eta, zeta, u_exact); for(m=0; m<5; m++){ double add=u(m,i,j,k)-u_exact[m]; rms_loc[m]+=add*add; } } for(m=0;m<5;m++){rms[i+nx*(j+ny*m)]=rms_loc[m];} } __global__ static void error_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz){ int i, m, maxpos, dist; double* buffer = (double*)extern_share_data; i = threadIdx.x; for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]=0.0;} while(i<nx*ny){ for(m=0;m<5;m++){buffer[threadIdx.x+(m*blockDim.x)]+=rms[i+nx*ny*m];} i+=blockDim.x; } maxpos=blockDim.x; dist=(maxpos+1)/2; i=threadIdx.x; __syncthreads(); while(maxpos>1){ if(i<dist && i+dist<maxpos){ for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]+=buffer[(i+dist)+(m*blockDim.x)];} } maxpos=dist; dist=(dist+1)/2; __syncthreads(); } m=threadIdx.x; if(m<5){rms[m]=sqrt(buffer[0+(m*blockDim.x)]/((double)(nz-2)*(double)(ny-2)*(double)(nx-2)));} } /* * --------------------------------------------------------------------- * compute the right hand side based on exact solution * --------------------------------------------------------------------- */ static void exact_rhs_gpu(){ #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_1); #endif /* #KERNEL EXACT RHS 1 */ int rhs1_workload = nx * ny * nz; int rhs1_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_1; int rhs1_blocks_per_grid = (ceil((double)rhs1_workload/(double)rhs1_threads_per_block)); hipLaunchKernelGGL(( exact_rhs_gpu_kernel_1), dim3( rhs1_blocks_per_grid), dim3( rhs1_threads_per_block), 0, 0, forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_1); #endif #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_2); #endif /* #KERNEL EXACT RHS 2 */ int rhs2_threads_per_block; dim3 rhs2_blocks_per_grid(nz, ny); if(THREADS_PER_BLOCK_ON_EXACT_RHS_2 > nx){ rhs2_threads_per_block = nx; } else{ rhs2_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_2; } hipLaunchKernelGGL(( exact_rhs_gpu_kernel_2), dim3( rhs2_blocks_per_grid), dim3( rhs2_threads_per_block), 0, 0, forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_2); #endif #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_3); #endif /* #KERNEL EXACT RHS 3 */ int rhs3_threads_per_block; dim3 rhs3_blocks_per_grid(nz, nx); if(THREADS_PER_BLOCK_ON_EXACT_RHS_3 > ny){ rhs3_threads_per_block = ny; } else{ rhs3_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_3; } hipLaunchKernelGGL(( exact_rhs_gpu_kernel_3), dim3( rhs3_blocks_per_grid), dim3( rhs3_threads_per_block), 0, 0, forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_3); #endif #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_4); #endif /* #KERNEL EXACT RHS 4 */ int rhs4_threads_per_block; dim3 rhs4_blocks_per_grid(ny, nx); if(THREADS_PER_BLOCK_ON_EXACT_RHS_4 > nz){ rhs4_threads_per_block = nz; } else{ rhs4_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_4; } hipLaunchKernelGGL(( exact_rhs_gpu_kernel_4), dim3( rhs4_blocks_per_grid), dim3( rhs4_threads_per_block), 0, 0, forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_4); #endif } __global__ static void exact_rhs_gpu_kernel_1(double* forcing, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } /* * --------------------------------------------------------------------- * initialize * --------------------------------------------------------------------- */ /* array(m,i,j,k) */ forcing(0,i,j,k)=0.0; forcing(1,i,j,k)=0.0; forcing(2,i,j,k)=0.0; forcing(3,i,j,k)=0.0; forcing(4,i,j,k)=0.0; } __global__ static void exact_rhs_gpu_kernel_2(double* forcing, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, dtemp[5], dtpp; double ue[5][5], buf[3][5], cuf[3], q[3]; k=blockIdx.x*blockDim.x+threadIdx.x+1; j=blockIdx.y*blockDim.y+threadIdx.y+1; if(k>=(nz-1) || j>=(ny-1)){return;} using namespace constants_device; zeta=(double)k*dnzm1; eta=(double)j*dnym1; /* * --------------------------------------------------------------------- * xi-direction flux differences * --------------------------------------------------------------------- */ for(i=0; i<3; i++){ xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[i+1][m]=dtemp[m];} dtpp=1.0/dtemp[0]; for(m=1;m<5;m++){buf[i][m]=dtpp*dtemp[m];} cuf[i]=buf[i][1]*buf[i][1]; buf[i][0]=cuf[i]+buf[i][2]*buf[i][2]+buf[i][3]*buf[i][3]; q[i]=0.5*(buf[i][1]*ue[i+1][1]+buf[i][2]*ue[i+1][2]+buf[i][3]*ue[i+1][3]); } for(i=1; i<nx-1; i++){ if(i+2<nx){ xi=(double)(i+2)*dnxm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[4][m]=dtemp[m];} } dtemp[0]=0.0-tx2*(ue[3][1]-ue[1][1])+dx1tx1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]); dtemp[1]=0.0-tx2*((ue[3][1]*buf[2][1]+c2*(ue[3][4]-q[2]))-(ue[1][1]*buf[0][1]+c2*(ue[1][4]-q[0])))+xxcon1*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dx2tx1*(ue[3][1]-2.0*ue[2][1]+ue[1][1]); dtemp[2]=0.0-tx2*(ue[3][2]*buf[2][1]-ue[1][2]*buf[0][1])+xxcon2*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dx3tx1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]); dtemp[3]=0.0-tx2*(ue[3][3]*buf[2][1]-ue[1][3]*buf[0][1])+xxcon2*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dx4tx1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]); dtemp[4]=0.0-tx2*(buf[2][1]*(c1*ue[3][4]-c2*q[2])-buf[0][1]*(c1*ue[1][4]-c2*q[0]))+0.5*xxcon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+xxcon4*(cuf[2]-2.0*cuf[1]+cuf[0])+xxcon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dx5tx1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]); /* * --------------------------------------------------------------------- * fourth-order dissipation * --------------------------------------------------------------------- */ if(i==1){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(5.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(i==2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(i>=3 && i<nx-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(i==nx-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]);} }else if(i==nx-2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);} } for(m=0;m<5;m++){ ue[0][m]=ue[1][m]; ue[1][m]=ue[2][m]; ue[2][m]=ue[3][m]; ue[3][m]=ue[4][m]; buf[0][m]=buf[1][m]; buf[1][m]=buf[2][m]; } cuf[0]=cuf[1]; cuf[1]=cuf[2]; q[0]=q[1]; q[1]=q[2]; if(i<nx-2){ dtpp=1.0/ue[3][0]; for(m=1;m<5;m++){buf[2][m]=dtpp*ue[3][m];} cuf[2]=buf[2][1]*buf[2][1]; buf[2][0]=cuf[2]+buf[2][2]*buf[2][2]+buf[2][3]*buf[2][3]; q[2]=0.5*(buf[2][1]*ue[3][1]+buf[2][2]*ue[3][2]+buf[2][3]*ue[3][3]); } } } __global__ static void exact_rhs_gpu_kernel_3(double* forcing, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, dtemp[5], dtpp; double ue[5][5], buf[3][5], cuf[3], q[3]; k=blockIdx.x*blockDim.x+threadIdx.x+1; i=blockIdx.y*blockDim.y+threadIdx.y+1; if(k>=nz-1 || i>=nx-1){return;} using namespace constants_device; zeta=(double)k*dnzm1; xi=(double)i*dnxm1; /* * --------------------------------------------------------------------- * eta-direction flux differences * --------------------------------------------------------------------- */ for(j=0; j<3; j++){ eta=(double)j*dnym1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[j+1][m]=dtemp[m];} dtpp=1.0/dtemp[0]; for(m=1;m<5;m++){buf[j][m]=dtpp*dtemp[m];} cuf[j]=buf[j][2]*buf[j][2]; buf[j][0]=cuf[j]+buf[j][1]*buf[j][1]+buf[j][3]*buf[j][3]; q[j]=0.5*(buf[j][1]*ue[j+1][1]+buf[j][2]*ue[j+1][2]+buf[j][3]*ue[j+1][3]); } for(j=1; j<ny-1; j++){ if(j+2<ny){ eta=(double)(j+2)*dnym1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[4][m]=dtemp[m];} } dtemp[0]=forcing(0,i,j,k)-ty2*(ue[3][2]-ue[1][2])+dy1ty1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]); dtemp[1]=forcing(1,i,j,k)-ty2*(ue[3][1]*buf[2][2]-ue[1][1]*buf[0][2])+yycon2*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dy2ty1*(ue[3][1]-2.0*ue[2][1]+ ue[1][1]); dtemp[2]=forcing(2,i,j,k)-ty2*((ue[3][2]*buf[2][2]+c2*(ue[3][4]-q[2]))-(ue[1][2]*buf[0][2]+c2*(ue[1][4]-q[0])))+yycon1*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dy3ty1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]); dtemp[3]=forcing(3,i,j,k)-ty2*(ue[3][3]*buf[2][2]-ue[1][3]*buf[0][2])+yycon2*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dy4ty1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]); dtemp[4]=forcing(4,i,j,k)-ty2*(buf[2][2]*(c1*ue[3][4]-c2*q[2])-buf[0][2]*(c1*ue[1][4]-c2*q[0]))+0.5*yycon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+yycon4*(cuf[2]-2.0*cuf[1]+cuf[0])+yycon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dy5ty1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]); /* * --------------------------------------------------------------------- * fourth-order dissipation * --------------------------------------------------------------------- */ if(j==1){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(5.0*ue[2][m]-4.0*ue[3][m] +ue[4][m]);} }else if(j==2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(j>=3 && j<ny-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(j==ny-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]);} }else if(j==ny-2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);} } for(m=0; m<5; m++){ ue[0][m]=ue[1][m]; ue[1][m]=ue[2][m]; ue[2][m]=ue[3][m]; ue[3][m]=ue[4][m]; buf[0][m]=buf[1][m]; buf[1][m]=buf[2][m]; } cuf[0]=cuf[1]; cuf[1]=cuf[2]; q[0]=q[1]; q[1]=q[2]; if(j<ny-2){ dtpp=1.0/ue[3][0]; for(m=1;m<5;m++){buf[2][m]=dtpp*ue[3][m];} cuf[2]=buf[2][2]*buf[2][2]; buf[2][0]=cuf[2]+buf[2][1]*buf[2][1]+buf[2][3]*buf[2][3]; q[2]=0.5*(buf[2][1]*ue[3][1]+buf[2][2]*ue[3][2]+buf[2][3]*ue[3][3]); } } } __global__ static void exact_rhs_gpu_kernel_4(double* forcing, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, dtpp, dtemp[5]; double ue[5][5], buf[3][5], cuf[3], q[3]; j=blockIdx.x*blockDim.x+threadIdx.x+1; i=blockIdx.y*blockDim.y+threadIdx.y+1; if(j>=ny-1 || i>=nx-1){return;} using namespace constants_device; eta=(double)j*dnym1; xi=(double)i*dnxm1; /* * --------------------------------------------------------------------- * zeta-direction flux differences * --------------------------------------------------------------------- */ for(k=0; k<3; k++){ zeta=(double)k*dnzm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[k+1][m]=dtemp[m];} dtpp=1.0/dtemp[0]; for(m=1;m<5;m++){buf[k][m]=dtpp*dtemp[m];} cuf[k]=buf[k][3]*buf[k][3]; buf[k][0]=cuf[k]+buf[k][1]*buf[k][1]+buf[k][2]*buf[k][2]; q[k]=0.5*(buf[k][1]*ue[k+1][1]+buf[k][2]*ue[k+1][2]+buf[k][3]*ue[k+1][3]); } for(k=1; k<nz-1; k++){ if(k+2<nz){ zeta=(double)(k+2)*dnzm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[4][m]=dtemp[m];} } dtemp[0]=forcing(0,i,j,k)-tz2*(ue[3][3]-ue[1][3])+dz1tz1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]); dtemp[1]=forcing(1,i,j,k)-tz2*(ue[3][1]*buf[2][3]-ue[1][1]*buf[0][3])+zzcon2*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dz2tz1*(ue[3][1]-2.0*ue[2][1]+ue[1][1]); dtemp[2]=forcing(2,i,j,k)-tz2*(ue[3][2]*buf[2][3]-ue[1][2]*buf[0][3])+zzcon2*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dz3tz1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]); dtemp[3]=forcing(3,i,j,k)-tz2*((ue[3][3]*buf[2][3]+c2*(ue[3][4]-q[2]))-(ue[1][3]*buf[0][3]+c2*(ue[1][4]-q[0])))+zzcon1*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dz4tz1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]); dtemp[4]=forcing(4,i,j,k)-tz2*(buf[2][3]*(c1*ue[3][4]-c2*q[2])-buf[0][3]*(c1*ue[1][4]-c2*q[0]))+0.5*zzcon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+zzcon4*(cuf[2]-2.0*cuf[1]+cuf[0])+zzcon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dz5tz1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]); /* * --------------------------------------------------------------------- * fourth-order dissipation * --------------------------------------------------------------------- */ if(k==1){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(5.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(k==2){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(k>=3 && k<nz-3){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(k==nz-3){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]);} }else if(k==nz-2){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);} } /* * --------------------------------------------------------------------- * now change the sign of the forcing function * --------------------------------------------------------------------- */ for(m=0;m<5;m++){forcing(m,i,j,k)=-1.0*dtemp[m];} for(m=0; m<5; m++){ ue[0][m]=ue[1][m]; ue[1][m]=ue[2][m]; ue[2][m]=ue[3][m]; ue[3][m]=ue[4][m]; buf[0][m]=buf[1][m]; buf[1][m]=buf[2][m]; } cuf[0]=cuf[1]; cuf[1]=cuf[2]; q[0]=q[1]; q[1]=q[2]; if(k<nz-2){ dtpp=1.0/ue[3][0]; for(m=1;m<5;m++){buf[2][m]=dtpp*ue[3][m];} cuf[2]=buf[2][3]*buf[2][3]; buf[2][0]=cuf[2]+buf[2][1]*buf[2][1]+buf[2][2]*buf[2][2]; q[2]=0.5*(buf[2][1]*ue[3][1]+buf[2][2]*ue[3][2]+buf[2][3]*ue[3][3]); } } } /* * --------------------------------------------------------------------- * this function returns the exact solution at point xi, eta, zeta * --------------------------------------------------------------------- */ __device__ static void exact_solution_gpu_device(const double xi, const double eta, const double zeta, double* dtemp){ using namespace constants_device; for(int m=0; m<5; m++){ dtemp[m]=ce[0][m]+xi* (ce[1][m]+xi* (ce[4][m]+xi* (ce[7][m]+xi* ce[10][m])))+eta* (ce[2][m]+eta* (ce[5][m]+eta* (ce[8][m]+eta* ce[11][m])))+zeta* (ce[3][m]+zeta* (ce[6][m]+zeta* (ce[9][m]+zeta* ce[12][m]))); } } /* * --------------------------------------------------------------------- * this subroutine initializes the field variable u using * tri-linear transfinite interpolation of the boundary values * --------------------------------------------------------------------- */ static void initialize_gpu(){ #if defined(PROFILING) timer_start(PROFILING_INITIALIZE); #endif /* #KERNEL INITIALIZE */ int initialize_threads_per_block; dim3 initialize_blocks_per_grid(nz, ny); if(THREADS_PER_BLOCK_ON_INITIALIZE != nx){ initialize_threads_per_block = nx; } else{ initialize_threads_per_block = THREADS_PER_BLOCK_ON_INITIALIZE; } hipLaunchKernelGGL(( initialize_gpu_kernel), dim3( initialize_blocks_per_grid), dim3( initialize_threads_per_block), 0, 0, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_INITIALIZE); #endif } __global__ static void initialize_gpu_kernel(double* u, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, temp[5]; double Pface11[5], Pface12[5], Pface21[5], Pface22[5], Pface31[5], Pface32[5]; k=blockIdx.x; j=blockIdx.y; i=threadIdx.x; using namespace constants_device; /* * --------------------------------------------------------------------- * later (in compute_rhs_gpu) we compute 1/u for every element. a few of * the corner elements are not used, but it convenient (and faster) * to compute the whole thing with a simple loop. make sure those * values are nonzero by initializing the whole thing here. * --------------------------------------------------------------------- */ u(0,i,j,k)=1.0; u(1,i,j,k)=0.0; u(2,i,j,k)=0.0; u(3,i,j,k)=0.0; u(4,i,j,k)=1.0; /* * --------------------------------------------------------------------- * first store the "interpolated" values everywhere on the grid * --------------------------------------------------------------------- */ zeta=(double)k*dnzm1; eta=(double)j*dnym1; xi=(double)i*dnxm1; exact_solution_gpu_device(0.0, eta, zeta, Pface11); exact_solution_gpu_device(1.0, eta, zeta, Pface12); exact_solution_gpu_device(xi, 0.0, zeta, Pface21); exact_solution_gpu_device(xi, 1.0, zeta, Pface22); exact_solution_gpu_device(xi, eta, 0.0, Pface31); exact_solution_gpu_device(xi, eta, 1.0, Pface32); for(m=0; m<5; m++){ double Pxi=xi*Pface12[m]+(1.0-xi)*Pface11[m]; double Peta=eta*Pface22[m]+(1.0-eta)*Pface21[m]; double Pzeta=zeta*Pface32[m]+(1.0-zeta)*Pface31[m]; u(m,i,j,k)=Pxi+Peta+Pzeta-Pxi*Peta-Pxi*Pzeta-Peta*Pzeta+Pxi*Peta*Pzeta; } /* * --------------------------------------------------------------------- * now store the exact values on the boundaries * --------------------------------------------------------------------- * west face * --------------------------------------------------------------------- */ xi=0.0; if(i==0){ zeta=(double)k*dnzm1; eta=(double)j*dnym1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * east face * --------------------------------------------------------------------- */ xi=1.0; if(i==nx-1){ zeta=(double)k*dnzm1; eta=(double)j*dnym1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * south face * --------------------------------------------------------------------- */ eta=0.0; if(j==0){ zeta=(double)k*dnzm1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * north face * --------------------------------------------------------------------- */ eta=1.0; if(j==ny-1){ zeta=(double)k*dnzm1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * bottom face * --------------------------------------------------------------------- */ zeta=0.0; if(k==0){ eta=(double)j*dnym1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * top face * --------------------------------------------------------------------- */ zeta=1.0; if(k==nz-1){ eta=(double)j*dnym1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } } static void release_gpu(){ hipFree(u_device); hipFree(forcing_device); hipFree(rhs_device); hipFree(rho_i_device); hipFree(us_device); hipFree(vs_device); hipFree(ws_device); hipFree(qs_device); hipFree(speed_device); hipFree(square_device); hipFree(lhs_device); hipFree(rhs_buffer_device); hipFree(rms_buffer_device); } static void rhs_norm_gpu(double rms[]){ #if defined(PROFILING) timer_start(PROFILING_RHS_NORM_1); #endif /* #KERNEL RHS NORM 1 */ int rhs_norm_1_threads_per_block = THREADS_PER_BLOCK_ON_RHS_NORM_1; dim3 rhs_norm_1_blocks_per_grid(ny, nx); hipLaunchKernelGGL(( rhs_norm_gpu_kernel_1), dim3( rhs_norm_1_blocks_per_grid), dim3( rhs_norm_1_threads_per_block), 0, 0, rms_buffer_device, rhs_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_RHS_NORM_1); #endif #if defined(PROFILING) timer_start(PROFILING_RHS_NORM_2); #endif /* #KERNEL RHS NORM 2 */ int rhs_norm_2_threads_per_block = THREADS_PER_BLOCK_ON_RHS_NORM_2; int rhs_norm_2_blocks_per_grid = 1; hipLaunchKernelGGL(( rhs_norm_gpu_kernel_2), dim3( rhs_norm_2_blocks_per_grid), dim3( rhs_norm_2_threads_per_block), sizeof(double)*rhs_norm_2_threads_per_block*5, 0, rms_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_RHS_NORM_2); #endif hipMemcpy(rms, rms_buffer_device, 5*sizeof(double), hipMemcpyDeviceToHost); } __global__ static void rhs_norm_gpu_kernel_1(double* rms, const double* rhs, const int nx, const int ny, const int nz){ int i, j, k, m; double rms_loc[5]; j=blockIdx.x*blockDim.x+threadIdx.x; i=blockIdx.y*blockDim.y+threadIdx.y; if(j>=ny || i>=nx){return;} for(m=0;m<5;m++){rms_loc[m]=0.0;} if(i>=1 && i<nx-1 && j>=1 && j<ny-1){ for(k=1; k<nz-1; k++){ for(int m=0; m<5; m++){ double add=rhs(m,i,j,k); rms_loc[m]+=add*add; } } } for(m=0;m<5;m++){rms[i+nx*(j+ny*m)]=rms_loc[m];} } __global__ static void rhs_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz){ int i, m, maxpos, dist; double* buffer = (double*)extern_share_data; i = threadIdx.x; for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]=0.0;} while(i<nx*ny){ for(m=0;m<5;m++){buffer[threadIdx.x+(m*blockDim.x)]+=rms[i+nx*ny*m];} i+=blockDim.x; } maxpos=blockDim.x; dist=(maxpos+1)/2; i=threadIdx.x; __syncthreads(); while(maxpos>1){ if(i<dist && i+dist<maxpos){ for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]+=buffer[(i+dist)+(m*blockDim.x)];} } maxpos=dist; dist=(dist+1)/2; __syncthreads(); } m=threadIdx.x; if(m<5){rms[m]=sqrt(buffer[0+(m*blockDim.x)]/((double)(nz-2)*(double)(ny-2)*(double)(nx-2)));} } static void set_constants(){ double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3, dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4, dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt, dxmax, dymax, dzmax, xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt, dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16, ce[13][5]; /* */ ce[0][0]=2.0; ce[1][0]=0.0; ce[2][0]=0.0; ce[3][0]=4.0; ce[4][0]=5.0; ce[5][0]=3.0; ce[6][0]=0.5; ce[7][0]=0.02; ce[8][0]=0.01; ce[9][0]=0.03; ce[10][0]=0.5; ce[11][0]=0.4; ce[12][0]=0.3; /* */ ce[0][1]=1.0; ce[1][1]=0.0; ce[2][1]=0.0; ce[3][1]=0.0; ce[4][1]=1.0; ce[5][1]=2.0; ce[6][1]=3.0; ce[7][1]=0.01; ce[8][1]=0.03; ce[9][1]=0.02; ce[10][1]=0.4; ce[11][1]=0.3; ce[12][1]=0.5; /* */ ce[0][2]=2.0; ce[1][2]=2.0; ce[2][2]=0.0; ce[3][2]=0.0; ce[4][2]=0.0; ce[5][2]=2.0; ce[6][2]=3.0; ce[7][2]=0.04; ce[8][2]=0.03; ce[9][2]=0.05; ce[10][2]=0.3; ce[11][2]=0.5; ce[12][2]=0.4; /* */ ce[0][3]=2.0; ce[1][3]=2.0; ce[2][3]=0.0; ce[3][3]=0.0; ce[4][3]=0.0; ce[5][3]=2.0; ce[6][3]=3.0; ce[7][3]=0.03; ce[8][3]=0.05; ce[9][3]=0.04; ce[10][3]=0.2; ce[11][3]=0.1; ce[12][3]=0.3; /* */ ce[0][4]=5.0; ce[1][4]=4.0; ce[2][4]=3.0; ce[3][4]=2.0; ce[4][4]=0.1; ce[5][4]=0.4; ce[6][4]=0.3; ce[7][4]=0.05; ce[8][4]=0.04; ce[9][4]=0.03; ce[10][4]=0.1; ce[11][4]=0.3; ce[12][4]=0.2; /* */ bt=sqrt(0.5); dt=dt_host; c1=1.4; c2=0.4; c3=0.1; c4=1.0; c5=1.4; dnxm1=1.0/(double)(grid_points[0]-1); dnym1=1.0/(double)(grid_points[1]-1); dnzm1=1.0/(double)(grid_points[2]-1); c1c2=c1*c2; c1c5=c1*c5; c3c4=c3*c4; c1345=c1c5*c3c4; conz1=(1.0-c1c5); tx1=1.0/(dnxm1*dnxm1); tx2=1.0/(2.0*dnxm1); tx3=1.0/dnxm1; ty1=1.0/(dnym1*dnym1); ty2=1.0/(2.0*dnym1); ty3=1.0/dnym1; tz1=1.0/(dnzm1*dnzm1); tz2=1.0/(2.0*dnzm1); tz3=1.0/dnzm1; dx1=0.75; dx2=0.75; dx3=0.75; dx4=0.75; dx5=0.75; dy1=0.75; dy2=0.75; dy3=0.75; dy4=0.75; dy5=0.75; dz1=1.0; dz2=1.0; dz3=1.0; dz4=1.0; dz5=1.0; dxmax=max(dx3, dx4); dymax=max(dy2, dy4); dzmax=max(dz2, dz3); dssp=0.25*max(dx1, max(dy1, dz1)); c4dssp=4.0*dssp; c5dssp=5.0*dssp; dttx1=dt*tx1; dttx2=dt*tx2; dtty1=dt*ty1; dtty2=dt*ty2; dttz1=dt*tz1; dttz2=dt*tz2; c2dttx1=2.0*dttx1; c2dtty1=2.0*dtty1; c2dttz1=2.0*dttz1; dtdssp=dt*dssp; comz1=dtdssp; comz4=4.0*dtdssp; comz5=5.0*dtdssp; comz6=6.0*dtdssp; c3c4tx3=c3c4*tx3; c3c4ty3=c3c4*ty3; c3c4tz3=c3c4*tz3; dx1tx1=dx1*tx1; dx2tx1=dx2*tx1; dx3tx1=dx3*tx1; dx4tx1=dx4*tx1; dx5tx1=dx5*tx1; dy1ty1=dy1*ty1; dy2ty1=dy2*ty1; dy3ty1=dy3*ty1; dy4ty1=dy4*ty1; dy5ty1=dy5*ty1; dz1tz1=dz1*tz1; dz2tz1=dz2*tz1; dz3tz1=dz3*tz1; dz4tz1=dz4*tz1; dz5tz1=dz5*tz1; c2iv=2.5; con43=4.0/3.0; con16=1.0/6.0; xxcon1=c3c4tx3*con43*tx3; xxcon2=c3c4tx3*tx3; xxcon3=c3c4tx3*conz1*tx3; xxcon4=c3c4tx3*con16*tx3; xxcon5=c3c4tx3*c1c5*tx3; yycon1=c3c4ty3*con43*ty3; yycon2=c3c4ty3*ty3; yycon3=c3c4ty3*conz1*ty3; yycon4=c3c4ty3*con16*ty3; yycon5=c3c4ty3*c1c5*ty3; zzcon1=c3c4tz3*con43*tz3; zzcon2=c3c4tz3*tz3; zzcon3=c3c4tz3*conz1*tz3; zzcon4=c3c4tz3*con16*tz3; zzcon5=c3c4tz3*c1c5*tz3; /* */ hipMemcpyToSymbol(constants_device::ce, &ce, 13*5*sizeof(double)); hipMemcpyToSymbol(constants_device::dt, &dt, sizeof(double)); hipMemcpyToSymbol(constants_device::bt, &bt, sizeof(double)); hipMemcpyToSymbol(constants_device::c1, &c1, sizeof(double)); hipMemcpyToSymbol(constants_device::c2, &c2, sizeof(double)); hipMemcpyToSymbol(constants_device::c3, &c3, sizeof(double)); hipMemcpyToSymbol(constants_device::c4, &c4, sizeof(double)); hipMemcpyToSymbol(constants_device::c5, &c5, sizeof(double)); hipMemcpyToSymbol(constants_device::dnxm1, &dnxm1, sizeof(double)); hipMemcpyToSymbol(constants_device::dnym1, &dnym1, sizeof(double)); hipMemcpyToSymbol(constants_device::dnzm1, &dnzm1, sizeof(double)); hipMemcpyToSymbol(constants_device::c1c2, &c1c2, sizeof(double)); hipMemcpyToSymbol(constants_device::c1c5, &c1c5, sizeof(double)); hipMemcpyToSymbol(constants_device::c3c4, &c3c4, sizeof(double)); hipMemcpyToSymbol(constants_device::c1345, &c1345, sizeof(double)); hipMemcpyToSymbol(constants_device::conz1, &conz1, sizeof(double)); hipMemcpyToSymbol(constants_device::tx1, &tx1, sizeof(double)); hipMemcpyToSymbol(constants_device::tx2, &tx2, sizeof(double)); hipMemcpyToSymbol(constants_device::tx3, &tx3, sizeof(double)); hipMemcpyToSymbol(constants_device::ty1, &ty1, sizeof(double)); hipMemcpyToSymbol(constants_device::ty2, &ty2, sizeof(double)); hipMemcpyToSymbol(constants_device::ty3, &ty3, sizeof(double)); hipMemcpyToSymbol(constants_device::tz1, &tz1, sizeof(double)); hipMemcpyToSymbol(constants_device::tz2, &tz2, sizeof(double)); hipMemcpyToSymbol(constants_device::tz3, &tz3, sizeof(double)); hipMemcpyToSymbol(constants_device::dx1, &dx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dx2, &dx2, sizeof(double)); hipMemcpyToSymbol(constants_device::dx3, &dx3, sizeof(double)); hipMemcpyToSymbol(constants_device::dx4, &dx4, sizeof(double)); hipMemcpyToSymbol(constants_device::dx5, &dx5, sizeof(double)); hipMemcpyToSymbol(constants_device::dy1, &dy1, sizeof(double)); hipMemcpyToSymbol(constants_device::dy2, &dy2, sizeof(double)); hipMemcpyToSymbol(constants_device::dy3, &dy3, sizeof(double)); hipMemcpyToSymbol(constants_device::dy4, &dy4, sizeof(double)); hipMemcpyToSymbol(constants_device::dy5, &dy5, sizeof(double)); hipMemcpyToSymbol(constants_device::dz1, &dz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dz2, &dz2, sizeof(double)); hipMemcpyToSymbol(constants_device::dz3, &dz3, sizeof(double)); hipMemcpyToSymbol(constants_device::dz4, &dz4, sizeof(double)); hipMemcpyToSymbol(constants_device::dz5, &dz5, sizeof(double)); hipMemcpyToSymbol(constants_device::dxmax, &dxmax, sizeof(double)); hipMemcpyToSymbol(constants_device::dymax, &dymax, sizeof(double)); hipMemcpyToSymbol(constants_device::dzmax, &dzmax, sizeof(double)); hipMemcpyToSymbol(constants_device::dssp, &dssp, sizeof(double)); hipMemcpyToSymbol(constants_device::c4dssp, &c4dssp, sizeof(double)); hipMemcpyToSymbol(constants_device::c5dssp, &c5dssp, sizeof(double)); hipMemcpyToSymbol(constants_device::dttx1, &dttx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dttx2, &dttx2, sizeof(double)); hipMemcpyToSymbol(constants_device::dtty1, &dtty1, sizeof(double)); hipMemcpyToSymbol(constants_device::dtty2, &dtty2, sizeof(double)); hipMemcpyToSymbol(constants_device::dttz1, &dttz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dttz2, &dttz2, sizeof(double)); hipMemcpyToSymbol(constants_device::c2dttx1, &c2dttx1, sizeof(double)); hipMemcpyToSymbol(constants_device::c2dtty1, &c2dtty1, sizeof(double)); hipMemcpyToSymbol(constants_device::c2dttz1, &c2dttz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dtdssp, &dtdssp, sizeof(double)); hipMemcpyToSymbol(constants_device::comz1, &comz1, sizeof(double)); hipMemcpyToSymbol(constants_device::comz4, &comz4, sizeof(double)); hipMemcpyToSymbol(constants_device::comz5, &comz5, sizeof(double)); hipMemcpyToSymbol(constants_device::comz6, &comz6, sizeof(double)); hipMemcpyToSymbol(constants_device::c3c4tx3, &c3c4tx3, sizeof(double)); hipMemcpyToSymbol(constants_device::c3c4ty3, &c3c4ty3, sizeof(double)); hipMemcpyToSymbol(constants_device::c3c4tz3, &c3c4tz3, sizeof(double)); hipMemcpyToSymbol(constants_device::dx1tx1, &dx1tx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dx2tx1, &dx2tx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dx3tx1, &dx3tx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dx4tx1, &dx4tx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dx5tx1, &dx5tx1, sizeof(double)); hipMemcpyToSymbol(constants_device::dy1ty1, &dy1ty1, sizeof(double)); hipMemcpyToSymbol(constants_device::dy2ty1, &dy2ty1, sizeof(double)); hipMemcpyToSymbol(constants_device::dy3ty1, &dy3ty1, sizeof(double)); hipMemcpyToSymbol(constants_device::dy4ty1, &dy4ty1, sizeof(double)); hipMemcpyToSymbol(constants_device::dy5ty1, &dy5ty1, sizeof(double)); hipMemcpyToSymbol(constants_device::dz1tz1, &dz1tz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dz2tz1, &dz2tz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dz3tz1, &dz3tz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dz4tz1, &dz4tz1, sizeof(double)); hipMemcpyToSymbol(constants_device::dz5tz1, &dz5tz1, sizeof(double)); hipMemcpyToSymbol(constants_device::c2iv, &c2iv, sizeof(double)); hipMemcpyToSymbol(constants_device::con43, &con43, sizeof(double)); hipMemcpyToSymbol(constants_device::con16, &con16, sizeof(double)); hipMemcpyToSymbol(constants_device::xxcon1, &xxcon1, sizeof(double)); hipMemcpyToSymbol(constants_device::xxcon2, &xxcon2, sizeof(double)); hipMemcpyToSymbol(constants_device::xxcon3, &xxcon3, sizeof(double)); hipMemcpyToSymbol(constants_device::xxcon4, &xxcon4, sizeof(double)); hipMemcpyToSymbol(constants_device::xxcon5, &xxcon5, sizeof(double)); hipMemcpyToSymbol(constants_device::yycon1, &yycon1, sizeof(double)); hipMemcpyToSymbol(constants_device::yycon2, &yycon2, sizeof(double)); hipMemcpyToSymbol(constants_device::yycon3, &yycon3, sizeof(double)); hipMemcpyToSymbol(constants_device::yycon4, &yycon4, sizeof(double)); hipMemcpyToSymbol(constants_device::yycon5, &yycon5, sizeof(double)); hipMemcpyToSymbol(constants_device::zzcon1, &zzcon1, sizeof(double)); hipMemcpyToSymbol(constants_device::zzcon2, &zzcon2, sizeof(double)); hipMemcpyToSymbol(constants_device::zzcon3, &zzcon3, sizeof(double)); hipMemcpyToSymbol(constants_device::zzcon4, &zzcon4, sizeof(double)); hipMemcpyToSymbol(constants_device::zzcon5, &zzcon5, sizeof(double)); } static void setup_gpu(){ /* * struct hipDeviceProp_t{ * char name[256]; * size_t totalGlobalMem; * size_t sharedMemPerBlock; * int regsPerBlock; * int warpSize; * size_t memPitch; * int maxThreadsPerBlock; * int maxThreadsDim[3]; * int maxGridSize[3]; * size_t totalConstMem; * int major; * int minor; * int clockRate; * size_t textureAlignment; * int deviceOverlap; * int multiProcessorCount; * int kernelExecTimeoutEnabled; * int integrated; * int canMapHostMemory; * int computeMode; * int concurrentKernels; * int ECCEnabled; * int pciBusID; * int pciDeviceID; * int tccDriver; * } */ /* amount of available devices */ hipGetDeviceCount(&total_devices); /* define gpu_device */ if(total_devices==0){ printf("\n\n\nNo Nvidia GPU found!\n\n\n"); exit(-1); }else if((GPU_DEVICE>=0)&& (GPU_DEVICE<total_devices)){ gpu_device_id = GPU_DEVICE; }else{ gpu_device_id = 0; } hipSetDevice(gpu_device_id); hipGetDeviceProperties(&gpu_device_properties, gpu_device_id); /* define threads_per_block */ if((SP_THREADS_PER_BLOCK_ON_ADD>=1)&& (SP_THREADS_PER_BLOCK_ON_ADD<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_ADD = SP_THREADS_PER_BLOCK_ON_ADD; } else{ THREADS_PER_BLOCK_ON_ADD = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_1>=1)&& (SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_COMPUTE_RHS_1 = SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_1; } else{ THREADS_PER_BLOCK_ON_COMPUTE_RHS_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_2>=1)&& (SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_COMPUTE_RHS_2 = SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_2; } else{ THREADS_PER_BLOCK_ON_COMPUTE_RHS_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_ERROR_NORM_1>=1)&& (SP_THREADS_PER_BLOCK_ON_ERROR_NORM_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_ERROR_NORM_1 = SP_THREADS_PER_BLOCK_ON_ERROR_NORM_1; } else{ THREADS_PER_BLOCK_ON_ERROR_NORM_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_ERROR_NORM_2>=1)&& (SP_THREADS_PER_BLOCK_ON_ERROR_NORM_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_ERROR_NORM_2 = SP_THREADS_PER_BLOCK_ON_ERROR_NORM_2; } else{ THREADS_PER_BLOCK_ON_ERROR_NORM_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_1>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_1 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_1; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_2>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_2 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_2; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_3>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_3<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_3 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_3; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_3 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_4>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_4<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_4 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_4; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_4=gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_INITIALIZE>=1)&& (SP_THREADS_PER_BLOCK_ON_INITIALIZE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_INITIALIZE = SP_THREADS_PER_BLOCK_ON_INITIALIZE; } else{ THREADS_PER_BLOCK_ON_INITIALIZE=gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_RHS_NORM_1>=1)&& (SP_THREADS_PER_BLOCK_ON_RHS_NORM_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_RHS_NORM_1 = SP_THREADS_PER_BLOCK_ON_RHS_NORM_1; } else{ THREADS_PER_BLOCK_ON_RHS_NORM_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_RHS_NORM_2>=1)&& (SP_THREADS_PER_BLOCK_ON_RHS_NORM_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_RHS_NORM_2 = SP_THREADS_PER_BLOCK_ON_RHS_NORM_2; } else{ THREADS_PER_BLOCK_ON_RHS_NORM_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_TXINVR>=1)&& (SP_THREADS_PER_BLOCK_ON_TXINVR<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_TXINVR = SP_THREADS_PER_BLOCK_ON_TXINVR; } else{ THREADS_PER_BLOCK_ON_TXINVR = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_X_SOLVE>=1)&& (SP_THREADS_PER_BLOCK_ON_X_SOLVE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_X_SOLVE = SP_THREADS_PER_BLOCK_ON_X_SOLVE; } else{ THREADS_PER_BLOCK_ON_X_SOLVE = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_Y_SOLVE>=1)&& (SP_THREADS_PER_BLOCK_ON_Y_SOLVE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_Y_SOLVE = SP_THREADS_PER_BLOCK_ON_Y_SOLVE; } else{ THREADS_PER_BLOCK_ON_Y_SOLVE = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_Z_SOLVE>=1)&& (SP_THREADS_PER_BLOCK_ON_Z_SOLVE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_Z_SOLVE = SP_THREADS_PER_BLOCK_ON_Z_SOLVE; } else{ THREADS_PER_BLOCK_ON_Z_SOLVE = gpu_device_properties.warpSize; } int gridsize=nx*ny*nz; int facesize=max(max(nx*ny, nx*nz), ny*nz); size_u_device=sizeof(double)*(5*gridsize); size_forcing_device=sizeof(double)*(5*gridsize); size_rhs_device=sizeof(double)*(5*gridsize); size_rho_i_device=sizeof(double)*(gridsize); size_us_device=sizeof(double)*(gridsize); size_vs_device=sizeof(double)*(gridsize); size_ws_device=sizeof(double)*(gridsize); size_qs_device=sizeof(double)*(gridsize); size_speed_device=sizeof(double)*(gridsize); size_square_device=sizeof(double)*(gridsize); size_lhs_device=sizeof(double)*(9*gridsize); size_rhs_buffer_device=sizeof(double)*(5*gridsize); size_rms_buffer_device=sizeof(double)*(5*facesize); hipMalloc(&u_device, size_u_device); hipMalloc(&forcing_device, size_forcing_device); hipMalloc(&rhs_device, size_rhs_device); hipMalloc(&rho_i_device, size_rho_i_device); hipMalloc(&us_device, size_us_device); hipMalloc(&vs_device, size_vs_device); hipMalloc(&ws_device, size_ws_device); hipMalloc(&qs_device, size_qs_device); hipMalloc(&speed_device, size_speed_device); hipMalloc(&square_device, size_square_device); hipMalloc(&lhs_device, size_lhs_device); hipMalloc(&rhs_buffer_device, size_rhs_buffer_device); hipMalloc(&rms_buffer_device, size_rms_buffer_device); } /* * --------------------------------------------------------------------- * block-diagonal matrix-vector multiplication * --------------------------------------------------------------------- */ static void txinvr_gpu(){ #if defined(PROFILING) timer_start(PROFILING_TXINVR); #endif /* #KERNEL TXINVR */ int txinvr_workload = nx * ny * nz; int txinvr_threads_per_block = THREADS_PER_BLOCK_ON_TXINVR; int txinvr_blocks_per_grid = (ceil((double)txinvr_workload/(double)txinvr_threads_per_block)); hipLaunchKernelGGL(( txinvr_gpu_kernel), dim3( txinvr_blocks_per_grid), dim3( txinvr_threads_per_block), 0, 0, rho_i_device, us_device, vs_device, ws_device, speed_device, qs_device, rhs_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_TXINVR); #endif } __global__ static void txinvr_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, double* rhs, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } using namespace constants_device; double ru1=rho_i(i,j,k); double uu=us(i,j,k); double vv=vs(i,j,k); double ww=ws(i,j,k); double ac=speed(i,j,k); double ac2inv=1.0/(ac*ac); double r1=rhs(0,i,j,k); double r2=rhs(1,i,j,k); double r3=rhs(2,i,j,k); double r4=rhs(3,i,j,k); double r5=rhs(4,i,j,k); double t1=c2*ac2inv*(qs(i,j,k)*r1-uu*r2-vv*r3-ww*r4+r5); double t2=bt*ru1*(uu*r1-r2); double t3=(bt*ru1*ac)*t1; rhs(0,i,j,k)=r1-t1; rhs(1,i,j,k)=-ru1*(ww*r1-r4); rhs(2,i,j,k)=ru1*(vv*r1-r3); rhs(3,i,j,k)=-t2+t3; rhs(4,i,j,k)=t2+t3; } /* * --------------------------------------------------------------------- * verification routine * --------------------------------------------------------------------- */ static void verify_gpu(int no_time_steps, char* class_npb, boolean* verified){ double dt=dt_host; double xcrref[5], xceref[5], xcrdif[5], xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /* * --------------------------------------------------------------------- * tolerance level * --------------------------------------------------------------------- */ epsilon=1.0e-08; /* * --------------------------------------------------------------------- * compute the error norm and the residual norm, and exit if not printing * --------------------------------------------------------------------- */ error_norm_gpu(xce); compute_rhs_gpu(); rhs_norm_gpu(xcr); for(m=0;m<5;m++){xcr[m]=xcr[m]/dt;} *class_npb='U'; *verified=TRUE; for(m=0;m<5;m++){xcrref[m]=1.0;xceref[m]=1.0;} /* * --------------------------------------------------------------------- * reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02 * --------------------------------------------------------------------- */ if((grid_points[0]==12)&&(grid_points[1]==12)&&(grid_points[2]==12)&&(no_time_steps==100)){ *class_npb='S'; dtref=1.5e-2; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=2.7470315451339479e-02; xcrref[1]=1.0360746705285417e-02; xcrref[2]=1.6235745065095532e-02; xcrref[3]=1.5840557224455615e-02; xcrref[4]=3.4849040609362460e-02; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=2.7289258557377227e-05; xceref[1]=1.0364446640837285e-05; xceref[2]=1.6154798287166471e-05; xceref[3]=1.5750704994480102e-05; xceref[4]=3.4177666183390531e-05; /* * --------------------------------------------------------------------- * reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==36)&&(grid_points[1]==36)&&(grid_points[2]==36)&&(no_time_steps==400)){ *class_npb='W'; dtref=1.5e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.1893253733584e-02; xcrref[1]=0.1717075447775e-03; xcrref[2]=0.2778153350936e-03; xcrref[3]=0.2887475409984e-03; xcrref[4]=0.3143611161242e-02; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.7542088599534e-04; xceref[1]=0.6512852253086e-05; xceref[2]=0.1049092285688e-04; xceref[3]=0.1128838671535e-04; xceref[4]=0.1212845639773e-03; /* * --------------------------------------------------------------------- * reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==64)&&(grid_points[1]==64)&&(grid_points[2]==64)&&(no_time_steps==400)){ *class_npb='A'; dtref=1.5e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual. * --------------------------------------------------------------------- */ xcrref[0]=2.4799822399300195; xcrref[1]=1.1276337964368832; xcrref[2]=1.5028977888770491; xcrref[3]=1.4217816211695179; xcrref[4]=2.1292113035138280; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error. * --------------------------------------------------------------------- */ xceref[0]=1.0900140297820550e-04; xceref[1]=3.7343951769282091e-05; xceref[2]=5.0092785406541633e-05; xceref[3]=4.7671093939528255e-05; xceref[4]=1.3621613399213001e-04; /* * --------------------------------------------------------------------- * reference data for 102X102X102 grids after 400 time steps, * with DT = 1.0d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==102)&&(grid_points[1]==102)&&(grid_points[2]==102)&&(no_time_steps==400)){ *class_npb='B'; dtref=1.0e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.6903293579998e+02; xcrref[1]=0.3095134488084e+02; xcrref[2]=0.4103336647017e+02; xcrref[3]=0.3864769009604e+02; xcrref[4]=0.5643482272596e+02; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.9810006190188e-02; xceref[1]=0.1022827905670e-02; xceref[2]=0.1720597911692e-02; xceref[3]=0.1694479428231e-02; xceref[4]=0.1847456263981e-01; /* * --------------------------------------------------------------------- * reference data for 162X162X162 grids after 400 time steps, * with DT = 0.67d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==162)&&(grid_points[1]==162)&&(grid_points[2]==162)&&(no_time_steps==400)){ *class_npb='C'; dtref=0.67e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.5881691581829e+03; xcrref[1]=0.2454417603569e+03; xcrref[2]=0.3293829191851e+03; xcrref[3]=0.3081924971891e+03; xcrref[4]=0.4597223799176e+03; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.2598120500183e+00; xceref[1]=0.2590888922315e-01; xceref[2]=0.5132886416320e-01; xceref[3]=0.4806073419454e-01; xceref[4]=0.5483377491301e+00; /* * --------------------------------------------------------------------- * reference data for 408X408X408 grids after 500 time steps, * with DT = 0.3d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==408)&&(grid_points[1]==408)&&(grid_points[2]==408)&&(no_time_steps==500)){ *class_npb='D'; dtref=0.30e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.1044696216887e+05; xcrref[1]=0.3204427762578e+04; xcrref[2]=0.4648680733032e+04; xcrref[3]=0.4238923283697e+04; xcrref[4]=0.7588412036136e+04; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.5089471423669e+01; xceref[1]=0.5323514855894e+00; xceref[2]=0.1187051008971e+01; xceref[3]=0.1083734951938e+01; xceref[4]=0.1164108338568e+02; /* * --------------------------------------------------------------------- * reference data for 1020X1020X1020 grids after 500 time steps, * with DT = 0.1d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==1020)&&(grid_points[1]==1020)&&(grid_points[2]==1020)&&(no_time_steps==500)){ *class_npb='E'; dtref=0.10e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.6255387422609e+05; xcrref[1]=0.1495317020012e+05; xcrref[2]=0.2347595750586e+05; xcrref[3]=0.2091099783534e+05; xcrref[4]=0.4770412841218e+05; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.6742735164909e+02; xceref[1]=0.5390656036938e+01; xceref[2]=0.1680647196477e+02; xceref[3]=0.1536963126457e+02; xceref[4]=0.1575330146156e+03; }else{ *verified=FALSE; } /* * --------------------------------------------------------------------- * verification test for residuals if gridsize is one of * the defined grid sizes above (class .ne. 'U') * --------------------------------------------------------------------- * compute the difference of solution values and the known reference values * --------------------------------------------------------------------- */ for(m=0; m<5; m++){ xcrdif[m]=fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m]=fabs((xce[m]-xceref[m])/xceref[m]); } /* * --------------------------------------------------------------------- * output the comparison of computed results to known cases * --------------------------------------------------------------------- */ if(*class_npb!='U'){ printf(" Verification being performed for class %c\n",*class_npb); printf(" accuracy setting for epsilon = %20.13E\n",epsilon); *verified=(fabs(dt-dtref)<=epsilon); if(!(*verified)){ *class_npb='U'; printf(" DT does not match the reference value of %15.8E\n",dtref); } }else{ printf(" Unknown class\n"); } if(*class_npb!='U'){ printf(" Comparison of RMS-norms of residual\n"); }else{ printf(" RMS-norms of residual\n"); } for(m=0;m<5;m++){ if(*class_npb=='U'){ printf(" %2d%20.13E\n",m+1,xcr[m]); }else if(xcrdif[m]<=epsilon){ printf(" %2d%20.13E%20.13E%20.13E\n",m+1,xcr[m],xcrref[m],xcrdif[m]); }else { *verified=FALSE; printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",m+1,xcr[m],xcrref[m],xcrdif[m]); } } if(*class_npb!='U'){ printf(" Comparison of RMS-norms of solution error\n"); }else{ printf(" RMS-norms of solution error\n"); } for(m=0;m<5;m++){ if(*class_npb=='U'){ printf(" %2d%20.13E\n",m+1,xce[m]); }else if(xcedif[m]<=epsilon){ printf(" %2d%20.13E%20.13E%20.13E\n",m+1,xce[m],xceref[m],xcedif[m]); }else{ *verified = FALSE; printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",m+1,xce[m],xceref[m],xcedif[m]); } } if(*class_npb=='U'){ printf(" No reference values provided\n"); printf(" No verification performed\n"); }else if(*verified){ printf(" Verification Successful\n"); }else{ printf(" Verification failed\n"); } } /* * --------------------------------------------------------------------- * this function performs the solution of the approximate factorization * step in the x-direction for all five matrix components * simultaneously. the thomas algorithm is employed to solve the * systems for the x-lines. boundary conditions are non-periodic * --------------------------------------------------------------------- */ static void x_solve_gpu(){ #if defined(PROFILING) timer_start(PROFILING_X_SOLVE); #endif /* #KERNEL X SOLVE */ int x_solve_threads_per_block; dim3 x_solve_blocks_per_grid(1, nz); if(THREADS_PER_BLOCK_ON_X_SOLVE != ny){ x_solve_threads_per_block = ny; } else{ x_solve_threads_per_block = THREADS_PER_BLOCK_ON_X_SOLVE; } hipLaunchKernelGGL(( x_solve_gpu_kernel), dim3( x_solve_blocks_per_grid), dim3( x_solve_threads_per_block), 0, 0, rho_i_device, us_device, speed_device, rhs_device, lhs_device, rhs_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_X_SOLVE); #endif } __global__ static void x_solve_gpu_kernel(const double* rho_i, const double* us, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz){ #define lhs(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m-3)))] #define lhsp(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m+4)))] #define lhsm(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m-3+2)))] #define rtmp(m,i,j,k) rhstmp[(j)+ny*((k)+nz*((i)+nx*(m)))] int i, j, k, m; double rhon[3], cv[3], _lhs[3][5], _lhsp[3][5], _rhs[3][5], fac1; /* coalesced */ j=blockIdx.x*blockDim.x+threadIdx.x+1; k=blockIdx.y*blockDim.y+threadIdx.y+1; /* uncoalesced */ /* k=blockIdx.x*blockDim.x+threadIdx.x+1; */ /* j=blockIdx.y*blockDim.y+threadIdx.y+1; */ if((k>=nz-1) || (j>=ny-1)){return;} using namespace constants_device; /* * --------------------------------------------------------------------- * computes the left hand side for the three x-factors * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ _lhs[0][0]=lhsp(0,0,j,k)=0.0; _lhs[0][1]=lhsp(1,0,j,k)=0.0; _lhs[0][2]=lhsp(2,0,j,k)=1.0; _lhs[0][3]=lhsp(3,0,j,k)=0.0; _lhs[0][4]=lhsp(4,0,j,k)=0.0; for(i=0; i<3; i++){ fac1=c3c4*rho_i(i,j,k); rhon[i]=max(max(max(dx2+con43*fac1, dx5+c1c5*fac1), dxmax+fac1), dx1); cv[i]=us(i,j,k); } _lhs[1][0]=0.0; _lhs[1][1]=-dttx2*cv[0]-dttx1*rhon[0]; _lhs[1][2]=1.0+c2dttx1*rhon[1]; _lhs[1][3]=dttx2*cv[2]-dttx1*rhon[2]; _lhs[1][4]=0.0; _lhs[1][2]+=comz5; _lhs[1][3]-=comz4; _lhs[1][4]+=comz1; for(m=0; m<5; m++){lhsp(m,1,j,k)=_lhs[1][m];} rhon[0]=rhon[1]; rhon[1]=rhon[2]; cv[0]=cv[1]; cv[1]=cv[2]; for(m=0; m<3; m++){ _rhs[0][m]=rhs(m,0,j,k); _rhs[1][m]=rhs(m,1,j,k); } /* * --------------------------------------------------------------------- * FORWARD ELIMINATION * --------------------------------------------------------------------- * perform the thomas algorithm; first, FORWARD ELIMINATION * --------------------------------------------------------------------- */ for(i=0; i<nx-2; i++){ /* * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ if((i+2)==(nx-1)){ _lhs[2][0]=lhsp(0,i+2,j,k)=0.0; _lhs[2][1]=lhsp(1,i+2,j,k)=0.0; _lhs[2][2]=lhsp(2,i+2,j,k)=1.0; _lhs[2][3]=lhsp(3,i+2,j,k)=0.0; _lhs[2][4]=lhsp(4,i+2,j,k)=0.0; }else{ fac1=c3c4*rho_i(i+3,j,k); rhon[2]=max(max(max(dx2+con43*fac1, dx5+c1c5*fac1), dxmax+fac1), dx1); cv[2]=us(i+3,j,k); _lhs[2][0]=0.0; _lhs[2][1]=-dttx2*cv[0]-dttx1*rhon[0]; _lhs[2][2]=1.0+c2dttx1*rhon[1]; _lhs[2][3]=dttx2*cv[2]-dttx1*rhon[2]; _lhs[2][4]=0.0; /* * --------------------------------------------------------------------- * add fourth order dissipation * --------------------------------------------------------------------- */ if((i+2)==(2)){ _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((i+2>=3) && (i+2<nx-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((i+2)==(nx-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; }else if((i+2)==(nx-2)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz5; } /* * --------------------------------------------------------------------- * store computed lhs for later reuse * --------------------------------------------------------------------- */ for(m=0;m<5;m++){lhsp(m,i+2,j,k)=_lhs[2][m];} rhon[0]=rhon[1]; rhon[1]=rhon[2]; cv[0]=cv[1]; cv[1]=cv[2]; } /* * --------------------------------------------------------------------- * load rhs values for current iteration * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[2][m]=rhs(m,i+2,j,k);} /* * --------------------------------------------------------------------- * perform current iteration * --------------------------------------------------------------------- */ fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[2][m]-=_lhs[2][0]*_rhs[0][m];} /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; for(m=0; m<5; m++){ _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } for(m=0; m<3; m++){ rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } } /* * --------------------------------------------------------------------- * the last two rows in this zone are a bit different, * since they do not have two more rows available for the * elimination of off-diagonal entries * --------------------------------------------------------------------- */ i=nx-2; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ fac1=1.0/_lhs[1][2]; for(m=0;m<3;m++){_rhs[1][m]*=fac1;} lhs(3,nx-2,j,k)=_lhs[0][3]; lhs(4,nx-2,j,k)=_lhs[0][4]; /* * --------------------------------------------------------------------- * subsequently, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(i=0;i<3;i++){cv[i]=speed(i,j,k);} for(m=0; m<5; m++){ _lhsp[0][m]=_lhs[0][m]=lhsp(m,0,j,k); _lhsp[1][m]=_lhs[1][m]=lhsp(m,1,j,k); } _lhsp[1][1]-= dttx2*cv[0]; _lhsp[1][3]+=dttx2*cv[2]; _lhs[1][1]+=dttx2*cv[0]; _lhs[1][3]-=dttx2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; _rhs[0][3]=rhs(3,0,j,k); _rhs[0][4]=rhs(4,0,j,k); _rhs[1][3]=rhs(3,1,j,k); _rhs[1][4]=rhs(4,1,j,k); /* * --------------------------------------------------------------------- * do the u+c and the u-c factors * --------------------------------------------------------------------- */ for(i=0; i<nx-2; i++){ /* * first, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(m=0; m<5; m++){ _lhsp[2][m]=_lhs[2][m]=lhsp(m,i+2,j,k); } _rhs[2][3]=rhs(3,i+2,j,k); _rhs[2][4]=rhs(4,i+2,j,k); if((i+2)<(nx-1)){ cv[2]=speed(i+3,j,k); _lhsp[2][1]-=dttx2*cv[0]; _lhsp[2][3]+=dttx2*cv[2]; _lhs[2][1]+=dttx2*cv[0]; _lhs[2][3]-=dttx2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; } m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; _lhsp[2][1]-=_lhsp[2][0]*_lhsp[0][3]; _lhsp[2][2]-=_lhsp[2][0]*_lhsp[0][4]; _rhs[2][m]-=_lhsp[2][0]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; _rhs[2][m]-=_lhs[2][0]*_rhs[0][m]; /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ for(m=3; m<5; m++){ lhsp(m,i,j,k)=_lhsp[0][m]; lhsm(m,i,j,k)=_lhs[0][m]; rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } for(m=0; m<5; m++){ _lhsp[0][m]=_lhsp[1][m]; _lhsp[1][m]=_lhsp[2][m]; _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } } /* * --------------------------------------------------------------------- * and again the last two rows separately * --------------------------------------------------------------------- */ i=nx-2; m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ _rhs[1][3]/=_lhsp[1][2]; _rhs[1][4]/=_lhs[1][2]; /* * --------------------------------------------------------------------- * BACKSUBSTITUTION * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]-=lhs(3,nx-2,j,k)*_rhs[1][m];} _rhs[0][3]-=_lhsp[0][3]*_rhs[1][3]; _rhs[0][4]-=_lhs[0][3]*_rhs[1][4]; for(m=0; m<5; m++){ _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } for(i=nx-3; i>=0; i--){ /* * --------------------------------------------------------------------- * the first three factors * --------------------------------------------------------------------- */ for(m=0; m<3; m++){_rhs[0][m]=rtmp(m,i,j,k)-lhs(3,i,j,k)*_rhs[1][m]-lhs(4,i,j,k)*_rhs[2][m];} /* * --------------------------------------------------------------------- * and the remaining two * --------------------------------------------------------------------- */ _rhs[0][3]=rtmp(3,i,j,k)-lhsp(3,i,j,k)*_rhs[1][3]-lhsp(4,i,j,k)*_rhs[2][3]; _rhs[0][4]=rtmp(4,i,j,k)-lhsm(3,i,j,k)*_rhs[1][4]-lhsm(4,i,j,k)*_rhs[2][4]; if(i+2<nx-1){ /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double r1=_rhs[2][0]; double r2=_rhs[2][1]; double r3=_rhs[2][2]; double r4=_rhs[2][3]; double r5=_rhs[2][4]; double t1=bt*r3; double t2=0.5*(r4+r5); _rhs[2][0]=-r2; _rhs[2][1]=r1; _rhs[2][2]=bt*(r4-r5); _rhs[2][3]=-t1+t2; _rhs[2][4]=t1+t2; } for(m=0; m<5; m++){ rhs(m,i+2,j,k)=_rhs[2][m]; _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } } /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double t1=bt*_rhs[2][2]; double t2=0.5*(_rhs[2][3]+_rhs[2][4]); rhs(0,1,j,k)=-_rhs[2][1]; rhs(1,1,j,k)=_rhs[2][0]; rhs(2,1,j,k)=bt*(_rhs[2][3]-_rhs[2][4]); rhs(3,1,j,k)=-t1+t2; rhs(4,1,j,k)=t1+t2; for(m=0;m<5;m++){rhs(m,0,j,k)=_rhs[1][m];} #undef lhs #undef lhsp #undef lhsm #undef rtmp } /* * --------------------------------------------------------------------- * this function performs the solution of the approximate factorization * step in the y-direction for all five matrix components * simultaneously. the thomas algorithm is employed to solve the * systems for the y-lines. boundary conditions are non-periodic * --------------------------------------------------------------------- */ static void y_solve_gpu(){ #if defined(PROFILING) timer_start(PROFILING_Y_SOLVE); #endif /* #KERNEL Y SOLVE */ int y_solve_threads_per_block; dim3 y_solve_blocks_per_grid(1, nz); if(THREADS_PER_BLOCK_ON_Y_SOLVE != nx){ y_solve_threads_per_block = nx; } else{ y_solve_threads_per_block = THREADS_PER_BLOCK_ON_Y_SOLVE; } hipLaunchKernelGGL(( y_solve_gpu_kernel), dim3( y_solve_blocks_per_grid), dim3( y_solve_threads_per_block), 0, 0, rho_i_device, vs_device, speed_device, rhs_device, lhs_device, rhs_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_Y_SOLVE); #endif } __global__ static void y_solve_gpu_kernel(const double* rho_i, const double* vs, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz){ #define lhs(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m-3)))] #define lhsp(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m+4)))] #define lhsm(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m-3+2)))] #define rtmp(m,i,j,k) rhstmp[(i)+nx*((k)+nz*((j)+ny*(m)))] int i, j, k, m; double rhoq[3], cv[3], _lhs[3][5], _lhsp[3][5], _rhs[3][5], fac1; /* coalesced */ i=blockIdx.x*blockDim.x+threadIdx.x+1; k=blockIdx.y*blockDim.y+threadIdx.y+1; /* uncoalesced */ /* k=blockIdx.x*blockDim.x+threadIdx.x+1; */ /* i=blockIdx.y*blockDim.y+threadIdx.y+1; */ if((k>=(nz-1))||(i>=(nx-1))){return;} using namespace constants_device; /* * --------------------------------------------------------------------- * computes the left hand side for the three y-factors * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ _lhs[0][0]=lhsp(0,i,0,k)=0.0; _lhs[0][1]=lhsp(1,i,0,k)=0.0; _lhs[0][2]=lhsp(2,i,0,k)=1.0; _lhs[0][3]=lhsp(3,i,0,k)=0.0; _lhs[0][4]=lhsp(4,i,0,k)=0.0; for(j=0; j<3; j++){ fac1=c3c4*rho_i(i,j,k); rhoq[j]=max(max(max(dy3+con43*fac1, dy5+c1c5*fac1), dymax+fac1), dy1); cv[j]=vs(i,j,k); } _lhs[1][0]=0.0; _lhs[1][1]=-dtty2*cv[0]-dtty1*rhoq[0]; _lhs[1][2]=1.0+c2dtty1*rhoq[1]; _lhs[1][3]=dtty2*cv[2]-dtty1*rhoq[2]; _lhs[1][4]=0.0; _lhs[1][2]+=comz5; _lhs[1][3]-=comz4; _lhs[1][4]+=comz1; for(m=0;m<5;m++){lhsp(m,i,1,k)=_lhs[1][m];} rhoq[0]=rhoq[1]; rhoq[1]=rhoq[2]; cv[0]=cv[1]; cv[1]=cv[2]; for(m=0; m<3; m++){ _rhs[0][m]=rhs(m,i,0,k); _rhs[1][m]=rhs(m,i,1,k); } /* * --------------------------------------------------------------------- * FORWARD ELIMINATION * --------------------------------------------------------------------- */ for(j=0; j<ny-2; j++){ /* * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ if((j+2)==(ny-1)){ _lhs[2][0]=lhsp(0,i,j+2,k)=0.0; _lhs[2][1]=lhsp(1,i,j+2,k)=0.0; _lhs[2][2]=lhsp(2,i,j+2,k)=1.0; _lhs[2][3]=lhsp(3,i,j+2,k)=0.0; _lhs[2][4]=lhsp(4,i,j+2,k)=0.0; }else{ fac1=c3c4*rho_i(i,j+3,k); rhoq[2]=max(max(max(dy3+con43*fac1, dy5+c1c5*fac1), dymax+fac1), dy1); cv[2]=vs(i,j+3,k); _lhs[2][0]=0.0; _lhs[2][1]=-dtty2*cv[0]-dtty1*rhoq[0]; _lhs[2][2]=1.0+c2dtty1*rhoq[1]; _lhs[2][3]=dtty2*cv[2]-dtty1*rhoq[2]; _lhs[2][4]=0.0; /* * --------------------------------------------------------------------- * add fourth order dissipation * --------------------------------------------------------------------- */ if((j+2)==(2)){ _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if(((j+2)>=(3))&&((j+2)<(ny-3))){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((j+2)==(ny-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; }else if((j+2)==(ny-2)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz5; } /* * --------------------------------------------------------------------- * store computed lhs for later reuse * --------------------------------------------------------------------- */ for(m=0;m<5;m++){lhsp(m,i,j+2,k)=_lhs[2][m];} rhoq[0]=rhoq[1]; rhoq[1]=rhoq[2]; cv[0]=cv[1]; cv[1]=cv[2]; } /* * --------------------------------------------------------------------- * load rhs values for current iteration * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[2][m]=rhs(m,i,j+2,k);} /* * --------------------------------------------------------------------- * perform current iteration * --------------------------------------------------------------------- */ fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[2][m]-=_lhs[2][0]*_rhs[0][m];} /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; for(m=0; m<5; m++){ _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } for(m=0; m<3; m++){ rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } } /* * --------------------------------------------------------------------- * the last two rows in this zone are a bit different, * since they do not have two more rows available for the * elimination of off-diagonal entries * --------------------------------------------------------------------- */ j=ny-2; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ fac1=1.0/_lhs[1][2]; for(m=0;m<3;m++){_rhs[1][m]*=fac1;} lhs(3,i,ny-2,k)=_lhs[0][3]; lhs(4,i,ny-2,k)=_lhs[0][4]; /* * --------------------------------------------------------------------- * do the u+c and the u-c factors * --------------------------------------------------------------------- */ for(j=0;j<3;j++){cv[j]=speed(i,j,k);} for(m=0; m<5; m++){ _lhsp[0][m]=_lhs[0][m]=lhsp(m,i,0,k); _lhsp[1][m]=_lhs[1][m]=lhsp(m,i,1,k); } _lhsp[1][1]-=dtty2*cv[0]; _lhsp[1][3]+=dtty2*cv[2]; _lhs[1][1]+=dtty2*cv[0]; _lhs[1][3]-=dtty2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; _rhs[0][3]=rhs(3,i,0,k); _rhs[0][4]=rhs(4,i,0,k); _rhs[1][3]=rhs(3,i,1,k); _rhs[1][4]=rhs(4,i,1,k); for(j=0; j<ny-2; j++){ for(m=0; m<5; m++){ _lhsp[2][m]=_lhs[2][m]=lhsp(m,i,j+2,k); } _rhs[2][3]=rhs(3,i,j+2,k); _rhs[2][4]=rhs(4,i,j+2,k); if((j+2)<(ny-1)){ cv[2]=speed(i,j+3,k); _lhsp[2][1]-=dtty2*cv[0]; _lhsp[2][3]+=dtty2*cv[2]; _lhs[2][1]+=dtty2*cv[0]; _lhs[2][3]-=dtty2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; } fac1=1.0/_lhsp[0][2]; m=3; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; _lhsp[2][1]-=_lhsp[2][0]*_lhsp[0][3]; _lhsp[2][2]-=_lhsp[2][0]*_lhsp[0][4]; _rhs[2][m]-=_lhsp[2][0]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; _rhs[2][m]-=_lhs[2][0]*_rhs[0][m]; /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ for(m=3; m<5; m++){ lhsp(m,i,j,k)=_lhsp[0][m]; lhsm(m,i,j,k)=_lhs[0][m]; rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } for(m=0; m<5; m++){ _lhsp[0][m]=_lhsp[1][m]; _lhsp[1][m]=_lhsp[2][m]; _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } } /* * --------------------------------------------------------------------- * and again the last two rows separately * --------------------------------------------------------------------- */ j=ny-2; m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ _rhs[1][3]/=_lhsp[1][2]; _rhs[1][4]/=_lhs[1][2]; /* * --------------------------------------------------------------------- * BACKSUBSTITUTION * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]-=lhs(3,i,ny-2,k)*_rhs[1][m];} _rhs[0][3]-=_lhsp[0][3]*_rhs[1][3]; _rhs[0][4]-=_lhs[0][3]*_rhs[1][4]; for(m=0; m<5; m++){ _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } for(j=ny-3; j>=0; j--){ /* * --------------------------------------------------------------------- * the first three factors * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]=rtmp(m,i,j,k)-lhs(3,i,j,k)*_rhs[1][m]-lhs(4,i,j,k)*_rhs[2][m];} /* * --------------------------------------------------------------------- * and the remaining two * --------------------------------------------------------------------- */ _rhs[0][3]=rtmp(3,i,j,k)-lhsp(3,i,j,k)*_rhs[1][3]-lhsp(4,i,j,k)*_rhs[2][3]; _rhs[0][4]=rtmp(4,i,j,k)-lhsm(3,i,j,k)*_rhs[1][4]-lhsm(4,i,j,k)*_rhs[2][4]; if((j+2)<(ny-1)){ /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double r1=_rhs[2][0]; double r2=_rhs[2][1]; double r3=_rhs[2][2]; double r4=_rhs[2][3]; double r5=_rhs[2][4]; double t1=bt*r1; double t2=0.5*(r4+r5); _rhs[2][0]=bt*(r4-r5); _rhs[2][1]=-r3; _rhs[2][2]=r2; _rhs[2][3]=-t1+t2; _rhs[2][4]=t1+t2; } for(m=0; m<5; m++){ rhs(m,i,j+2,k)=_rhs[2][m]; _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } } /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double t1=bt*_rhs[2][0]; double t2=0.5*(_rhs[2][3]+_rhs[2][4]); rhs(0,i,1,k)=bt*(_rhs[2][3]-_rhs[2][4]); rhs(1,i,1,k)=-_rhs[2][2]; rhs(2,i,1,k)=_rhs[2][1]; rhs(3,i,1,k)=-t1+t2; rhs(4,i,1,k)=t1+t2; for(m=0;m<5;m++){rhs(m,i,0,k)=_rhs[1][m];} #undef lhs #undef lhsp #undef lhsm #undef rtmp } /* * --------------------------------------------------------------------- * this function performs the solution of the approximate factorization * step in the z-direction for all five matrix components * simultaneously. The Thomas algorithm is employed to solve the * systems for the z-lines. Boundary conditions are non-periodic * --------------------------------------------------------------------- */ static void z_solve_gpu(){ #if defined(PROFILING) timer_start(PROFILING_Z_SOLVE); #endif /* #KERNEL Z SOLVE */ int z_solve_threads_per_block; dim3 z_solve_blocks_per_grid(1, ny); if(THREADS_PER_BLOCK_ON_Z_SOLVE != nx){ z_solve_threads_per_block = nx; } else{ z_solve_threads_per_block = THREADS_PER_BLOCK_ON_Z_SOLVE; } hipLaunchKernelGGL(( z_solve_gpu_kernel), dim3( z_solve_blocks_per_grid), dim3( z_solve_threads_per_block), 0, 0, rho_i_device, us_device, vs_device, ws_device, speed_device, qs_device, u_device, rhs_device, lhs_device, rhs_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_Z_SOLVE); #endif } __global__ static void z_solve_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, const double* u, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz){ #define lhs(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m-3)))] #define lhsp(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m+4)))] #define lhsm(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m-3+2)))] #define rtmp(m,i,j,k) rhstmp[(i)+nx*((j)+ny*((k)+nz*(m)))] int i, j, k, m; double rhos[3], cv[3], _lhs[3][5], _lhsp[3][5], _rhs[3][5], fac1; /* coalesced */ i=blockIdx.x*blockDim.x+threadIdx.x+1; j=blockIdx.y*blockDim.y+threadIdx.y+1; /* uncoalesced */ /* j=blockIdx.x*blockDim.x+threadIdx.x+1; */ /* i=blockIdx.y*blockDim.y+threadIdx.y+1; */ if((j>=(ny-1))||(i>=(nx-1))){return;} using namespace constants_device; /* * --------------------------------------------------------------------- * computes the left hand side for the three z-factors * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ _lhs[0][0]=lhsp(0,i,j,0)=0.0; _lhs[0][1]=lhsp(1,i,j,0)=0.0; _lhs[0][2]=lhsp(2,i,j,0)=1.0; _lhs[0][3]=lhsp(3,i,j,0)=0.0; _lhs[0][4]=lhsp(4,i,j,0)=0.0; for(k=0; k<3; k++){ fac1=c3c4*rho_i(i,j,k); rhos[k]=max(max(max(dz4+con43*fac1, dz5+c1c5*fac1), dzmax+fac1), dz1); cv[k]=ws(i,j,k); } _lhs[1][0]=0.0; _lhs[1][1]=-dttz2*cv[0]-dttz1*rhos[0]; _lhs[1][2]=1.0+c2dttz1*rhos[1]; _lhs[1][3]=dttz2*cv[2]-dttz1*rhos[2]; _lhs[1][4]=0.0; _lhs[1][2]+=comz5; _lhs[1][3]-=comz4; _lhs[1][4]+=comz1; for(m=0; m<5; m++){lhsp(m,i,j,1)=_lhs[1][m];} rhos[0]=rhos[1]; rhos[1]=rhos[2]; cv[0]=cv[1]; cv[1]=cv[2]; for(m=0; m<3; m++){ _rhs[0][m]=rhs(m,i,j,0); _rhs[1][m]=rhs(m,i,j,1); } /* * --------------------------------------------------------------------- * FORWARD ELIMINATION * --------------------------------------------------------------------- */ for(k=0; k<nz-2; k++){ /* * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ if((k+2)==(nz-1)){ _lhs[2][0]=lhsp(0,i,j,k+2)=0.0; _lhs[2][1]=lhsp(1,i,j,k+2)=0.0; _lhs[2][2]=lhsp(2,i,j,k+2)=1.0; _lhs[2][3]=lhsp(3,i,j,k+2)=0.0; _lhs[2][4]=lhsp(4,i,j,k+2)=0.0; }else{ fac1=c3c4*rho_i(i,j,k+3); rhos[2]=max(max(max(dz4+con43*fac1, dz5+c1c5*fac1), dzmax+fac1), dz1); cv[2]=ws(i,j,k+3); _lhs[2][0]=0.0; _lhs[2][1]=-dttz2*cv[0]-dttz1*rhos[0]; _lhs[2][2]=1.0+c2dttz1*rhos[1]; _lhs[2][3]=dttz2*cv[2]-dttz1*rhos[2]; _lhs[2][4]=0.0; /* * --------------------------------------------------------------------- * add fourth order dissipation * --------------------------------------------------------------------- */ if((k+2)==(2)){ _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if(((k+2)>=(3))&&((k+2)<(nz-3))){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((k+2)==(nz-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; }else if((k+2)==(nz-2)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz5; } /* * --------------------------------------------------------------------- * store computed lhs for later reuse * --------------------------------------------------------------------- */ for(m=0;m<5;m++){lhsp(m,i,j,k+2)=_lhs[2][m];} rhos[0]=rhos[1]; rhos[1]=rhos[2]; cv[0]=cv[1]; cv[1]=cv[2]; } /* * --------------------------------------------------------------------- * load rhs values for current iteration * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[2][m]=rhs(m,i,j,k+2);} /* * --------------------------------------------------------------------- * perform current iteration * --------------------------------------------------------------------- */ fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[2][m]-=_lhs[2][0]*_rhs[0][m];} /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; for(m=0; m<5; m++){ _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } for(m=0; m<3; m++){ rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } } /* * --------------------------------------------------------------------- * the last two rows in this zone are a bit different, * since they do not have two more rows available for the * elimination of off-diagonal entries * --------------------------------------------------------------------- */ k=nz-2; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ fac1=1.0/_lhs[1][2]; for(m=0;m<3;m++){_rhs[1][m]*=fac1;} lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; /* * --------------------------------------------------------------------- * subsequently, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(k=0;k<3;k++){cv[k]=speed(i,j,k);} for(m=0;m<5;m++){ _lhsp[0][m]=_lhs[0][m]=lhsp(m,i,j,0); _lhsp[1][m]=_lhs[1][m]=lhsp(m,i,j,1); } _lhsp[1][1]-=dttz2*cv[0]; _lhsp[1][3]+=dttz2*cv[2]; _lhs[1][1]+=dttz2*cv[0]; _lhs[1][3]-=dttz2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; _rhs[0][3]=rhs(3,i,j,0); _rhs[0][4]=rhs(4,i,j,0); _rhs[1][3]=rhs(3,i,j,1); _rhs[1][4]=rhs(4,i,j,1); /* * --------------------------------------------------------------------- * do the u+c and the u-c factors * --------------------------------------------------------------------- */ for(k=0; k<nz-2; k++){ /* * first, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(m=0; m<5; m++){ _lhsp[2][m]=_lhs[2][m]=lhsp(m,i,j,k+2); } _rhs[2][3]=rhs(3,i,j,k+2); _rhs[2][4]=rhs(4,i,j,k+2); if((k+2)<(nz-1)){ cv[2]=speed(i,j,k+3); _lhsp[2][1]-=dttz2*cv[0]; _lhsp[2][3]+=dttz2*cv[2]; _lhs[2][1]+=dttz2*cv[0]; _lhs[2][3]-=dttz2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; } m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; _lhsp[2][1]-=_lhsp[2][0]*_lhsp[0][3]; _lhsp[2][2]-=_lhsp[2][0]*_lhsp[0][4]; _rhs[2][m]-=_lhsp[2][0]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*= fac1; _lhs[0][4]*= fac1; _rhs[0][m]*= fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; _rhs[2][m]-=_lhs[2][0]*_rhs[0][m]; /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ for(m=3; m<5; m++){ lhsp(m,i,j,k)=_lhsp[0][m]; lhsm(m,i,j,k)=_lhs[0][m]; rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } for(m=0; m<5; m++){ _lhsp[0][m]=_lhsp[1][m]; _lhsp[1][m]=_lhsp[2][m]; _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } } /* * --------------------------------------------------------------------- * and again the last two rows separately * --------------------------------------------------------------------- */ k=nz-2; m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ _rhs[1][3]/=_lhsp[1][2]; _rhs[1][4]/=_lhs[1][2]; /* * --------------------------------------------------------------------- * BACKSUBSTITUTION * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]-=lhs(3,i,j,nz-2)*_rhs[1][m];} _rhs[0][3]-=_lhsp[0][3]*_rhs[1][3]; _rhs[0][4]-=_lhs[0][3]*_rhs[1][4]; for(m=0; m<5; m++){ _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } for(k=nz-3; k>=0; k--){ /* * --------------------------------------------------------------------- * the first three factors * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]=rtmp(m,i,j,k)-lhs(3,i,j,k)*_rhs[1][m]-lhs(4,i,j,k)*_rhs[2][m];} /* * --------------------------------------------------------------------- * and the remaining two * --------------------------------------------------------------------- */ _rhs[0][3]=rtmp(3,i,j,k)-lhsp(3,i,j,k)*_rhs[1][3]-lhsp(4,i,j,k)*_rhs[2][3]; _rhs[0][4]=rtmp(4,i,j,k)-lhsm(3,i,j,k)*_rhs[1][4]-lhsm(4,i,j,k)*_rhs[2][4]; if((k+2)<(nz-1)){ /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double xvel=us(i,j,k+2); double yvel=vs(i,j,k+2); double zvel=ws(i,j,k+2); double ac=speed(i,j,k+2); double uzik1=u(0,i,j,k+2); double t1=(bt*uzik1)/ac*(_rhs[2][3]+_rhs[2][4]); double t2=_rhs[2][2]+t1; double t3=bt*uzik1*(_rhs[2][3]-_rhs[2][4]); _rhs[2][4]=uzik1*(-xvel*_rhs[2][1]+yvel*_rhs[2][0])+qs(i,j,k+2)*t2+c2iv*(ac*ac)*t1+zvel*t3; _rhs[2][3]=zvel*t2+t3; _rhs[2][2]=uzik1*_rhs[2][0]+yvel*t2; _rhs[2][1]=-uzik1*_rhs[2][1]+xvel*t2; _rhs[2][0]=t2; } for(m=0; m<5; m++){ rhs(m,i,j,k+2)=_rhs[2][m]; _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } } /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double xvel=us(i,j,1); double yvel=vs(i,j,1); double zvel=ws(i,j,1); double ac=speed(i,j,1); double uzik1=u(0,i,j,1); double t1=(bt*uzik1)/ac*(_rhs[2][3]+_rhs[2][4]); double t2=_rhs[2][2]+t1; double t3=bt*uzik1*(_rhs[2][3]-_rhs[2][4]); rhs(4,i,j,1)=uzik1*(-xvel*_rhs[2][1]+yvel*_rhs[2][0])+qs(i,j,1)*t2+c2iv*(ac*ac)*t1+zvel*t3; rhs(3,i,j,1)=zvel*t2+t3; rhs(2,i,j,1)=uzik1*_rhs[2][0]+yvel*t2; rhs(1,i,j,1)=-uzik1*_rhs[2][1]+xvel*t2; rhs(0,i,j,1)=t2; for(m=0;m<5;m++){rhs(m,i,j,0)=_rhs[1][m];} #undef lhs #undef lhsp #undef lhsm #undef rtmp }
f948739511693b9d6ab1eea038be99505455f227.cu
/* * ------------------------------------------------------------------------------ * * MIT License * * Copyright (c) 2021 Parallel Applications Modelling Group - GMAP * GMAP website: https://gmap.pucrs.br * * Pontifical Catholic University of Rio Grande do Sul (PUCRS) * Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ------------------------------------------------------------------------------ * * The original NPB 3.4 version was written in Fortran and belongs to: * http://www.nas.nasa.gov/Software/NPB/ * * Authors of the Fortran code: * R. Van der Wijngaart * W. Saphir * H. Jin * * ------------------------------------------------------------------------------ * * The serial C++ version is a translation of the original NPB 3.4 * Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER * * Authors of the C++ code: * Dalvan Griebler <dalvangriebler@gmail.com> * Gabriell Araujo <hexenoften@gmail.com> * Júnior Löff <loffjh@gmail.com> * * ------------------------------------------------------------------------------ * * The CUDA version is a parallel implementation of the serial C++ version * CUDA version: https://github.com/GMAP/NPB-GPU/tree/master/CUDA * * Authors of the CUDA code: * Gabriell Araujo <hexenoften@gmail.com> * * ------------------------------------------------------------------------------ */ #include <cuda.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" #define IMAX (PROBLEM_SIZE) #define JMAX (PROBLEM_SIZE) #define KMAX (PROBLEM_SIZE) #define IMAXP (IMAX/2*2) #define JMAXP (JMAX/2*2) #define PROFILING_TOTAL_TIME (0) #define PROFILING_ADD (1) #define PROFILING_COMPUTE_RHS_1 (2) #define PROFILING_COMPUTE_RHS_2 (3) #define PROFILING_ERROR_NORM_1 (4) #define PROFILING_ERROR_NORM_2 (5) #define PROFILING_EXACT_RHS_1 (6) #define PROFILING_EXACT_RHS_2 (7) #define PROFILING_EXACT_RHS_3 (8) #define PROFILING_EXACT_RHS_4 (9) #define PROFILING_INITIALIZE (10) #define PROFILING_RHS_NORM_1 (11) #define PROFILING_RHS_NORM_2 (12) #define PROFILING_TXINVR (13) #define PROFILING_X_SOLVE (14) #define PROFILING_Y_SOLVE (15) #define PROFILING_Z_SOLVE (16) /* gpu linear pattern */ #define u(m,i,j,k) u[(i)+nx*((j)+ny*((k)+nz*(m)))] #define forcing(m,i,j,k) forcing[(i)+nx*((j)+ny*((k)+nz*(m)))] #define rhs(m,i,j,k) rhs[m+(i)*5+(j)*5*nx+(k)*5*nx*ny] #define rho_i(i,j,k) rho_i[i+(j)*nx+(k)*nx*ny] #define us(i,j,k) us[i+(j)*nx+(k)*nx*ny] #define vs(i,j,k) vs[i+(j)*nx+(k)*nx*ny] #define ws(i,j,k) ws[i+(j)*nx+(k)*nx*ny] #define square(i,j,k) square[i+(j)*nx+(k)*nx*ny] #define qs(i,j,k) qs[i+(j)*nx+(k)*nx*ny] #define speed(i,j,k) speed[i+(j)*nx+(k)*nx*ny] /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static double u_host[KMAX][JMAXP+1][IMAXP+1][5]; static double us_host[KMAX][JMAXP+1][IMAXP+1]; static double vs_host[KMAX][JMAXP+1][IMAXP+1]; static double ws_host[KMAX][JMAXP+1][IMAXP+1]; static double qs_host[KMAX][JMAXP+1][IMAXP+1]; static double rho_i_host[KMAX][JMAXP+1][IMAXP+1]; static double speed_host[KMAX][JMAXP+1][IMAXP+1]; static double square_host[KMAX][JMAXP+1][IMAXP+1]; static double rhs_host[KMAX][JMAXP+1][IMAXP+1][5]; static double forcing_host[KMAX][JMAXP+1][IMAXP+1][5]; static double cv_host[PROBLEM_SIZE]; static double rhon_host[PROBLEM_SIZE]; static double rhos_host[PROBLEM_SIZE]; static double rhoq_host[PROBLEM_SIZE]; static double cuf_host[PROBLEM_SIZE]; static double q_host[PROBLEM_SIZE]; static double ue_host[5][PROBLEM_SIZE]; static double buf_host[5][PROBLEM_SIZE]; static double lhs_host[IMAXP+1][IMAXP+1][5]; static double lhsp_host[IMAXP+1][IMAXP+1][5]; static double lhsm_host[IMAXP+1][IMAXP+1][5]; static double ce_host[13][5]; #else static double (*u_host)[JMAXP+1][IMAXP+1][5]=(double(*)[JMAXP+1][IMAXP+1][5])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1)*(5))); static double (*us_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*vs_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*ws_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*qs_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*rho_i_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*speed_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*square_host)[JMAXP+1][IMAXP+1]=(double(*)[JMAXP+1][IMAXP+1])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1))); static double (*rhs_host)[JMAXP+1][IMAXP+1][5]=(double(*)[JMAXP+1][IMAXP+1][5])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1)*(5))); static double (*forcing_host)[JMAXP+1][IMAXP+1][5]=(double(*)[JMAXP+1][IMAXP+1][5])malloc(sizeof(double)*((KMAX)*(JMAXP+1)*(IMAXP+1)*(5))); static double (*cv_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*rhon_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*rhos_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*rhoq_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*cuf_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*q_host)=(double*)malloc(sizeof(double)*(PROBLEM_SIZE)); static double (*ue_host)[PROBLEM_SIZE]=(double(*)[PROBLEM_SIZE])malloc(sizeof(double)*((PROBLEM_SIZE)*(5))); static double (*buf_host)[PROBLEM_SIZE]=(double(*)[PROBLEM_SIZE])malloc(sizeof(double)*((PROBLEM_SIZE)*(5))); static double (*lhs_host)[IMAXP+1][5]=(double(*)[IMAXP+1][5])malloc(sizeof(double)*((IMAXP+1)*(IMAXP+1)*(5))); static double (*lhsp_host)[IMAXP+1][5]=(double(*)[IMAXP+1][5])malloc(sizeof(double)*((IMAXP+1)*(IMAXP+1)*(5))); static double (*lhsm_host)[IMAXP+1][5]=(double(*)[IMAXP+1][5])malloc(sizeof(double)*((IMAXP+1)*(IMAXP+1)*(5))); static double (*ce_host)[5]=(double(*)[5])malloc(sizeof(double)*((13)*(5))); #endif static int grid_points[3]; static double dt_host; /* gpu variables */ static double* u_device; static double* forcing_device; static double* rhs_device; static double* rho_i_device; static double* us_device; static double* vs_device; static double* ws_device; static double* qs_device; static double* speed_device; static double* square_device; static double* lhs_device; static double* rhs_buffer_device; static double* rms_buffer_device; static size_t size_u_device; static size_t size_forcing_device; static size_t size_rhs_device; static size_t size_rho_i_device; static size_t size_us_device; static size_t size_vs_device; static size_t size_ws_device; static size_t size_qs_device; static size_t size_speed_device; static size_t size_square_device; static size_t size_lhs_device; static size_t size_rhs_buffer_device; static size_t size_rms_buffer_device; static int nx; static int ny; static int nz; static int THREADS_PER_BLOCK_ON_ADD; static int THREADS_PER_BLOCK_ON_COMPUTE_RHS_1; static int THREADS_PER_BLOCK_ON_COMPUTE_RHS_2; static int THREADS_PER_BLOCK_ON_ERROR_NORM_1; static int THREADS_PER_BLOCK_ON_ERROR_NORM_2; static int THREADS_PER_BLOCK_ON_EXACT_RHS_1; static int THREADS_PER_BLOCK_ON_EXACT_RHS_2; static int THREADS_PER_BLOCK_ON_EXACT_RHS_3; static int THREADS_PER_BLOCK_ON_EXACT_RHS_4; static int THREADS_PER_BLOCK_ON_INITIALIZE; static int THREADS_PER_BLOCK_ON_RHS_NORM_1; static int THREADS_PER_BLOCK_ON_RHS_NORM_2; static int THREADS_PER_BLOCK_ON_TXINVR; static int THREADS_PER_BLOCK_ON_X_SOLVE; static int THREADS_PER_BLOCK_ON_Y_SOLVE; static int THREADS_PER_BLOCK_ON_Z_SOLVE; int gpu_device_id; int total_devices; cudaDeviceProp gpu_device_properties; extern __shared__ double extern_share_data[]; namespace constants_device{ __constant__ double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3, dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4, dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt, dxmax, dymax, dzmax, xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt, dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16, ce[13][5]; } /* function prototypes */ static void add_gpu(); __global__ static void add_gpu_kernel(double* u, const double* rhs, const int nx, const int ny, const int nz); static void adi_gpu(); static void compute_rhs_gpu(); __global__ static void compute_rhs_gpu_kernel_1(double* rho_i, double* us, double* vs, double* ws, double* speed, double* qs, double* square, const double* u, const int nx, const int ny, const int nz); __global__ static void compute_rhs_gpu_kernel_2(const double* rho_i, const double* us, const double* vs, const double* ws, const double* qs, const double* square, double* rhs, const double* forcing, const double* u, const int nx, const int ny, const int nz); static void error_norm_gpu(double rms[]); __global__ static void error_norm_gpu_kernel_1(double* rms, const double* u, const int nx, const int ny, const int nz); __global__ static void error_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz); static void exact_rhs_gpu(); __global__ static void exact_rhs_gpu_kernel_1(double* forcing, const int nx, const int ny, const int nz); __global__ static void exact_rhs_gpu_kernel_2(double* forcing, const int nx, const int ny, const int nz); __global__ static void exact_rhs_gpu_kernel_3(double* forcing, const int nx, const int ny, const int nz); __global__ static void exact_rhs_gpu_kernel_4(double* forcing, const int nx, const int ny, const int nz); __device__ static void exact_solution_gpu_device(const double xi, const double eta, const double zeta, double* dtemp); static void initialize_gpu(); __global__ static void initialize_gpu_kernel(double* u, const int nx, const int ny, const int nz); static void release_gpu(); static void rhs_norm_gpu(double rms[]); __global__ static void rhs_norm_gpu_kernel_1(double* rms, const double* rhs, const int nx, const int ny, const int nz); __global__ static void rhs_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz); static void set_constants(); static void setup_gpu(); static void txinvr_gpu(); __global__ static void txinvr_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, double* rhs, const int nx, const int ny, const int nz); static void verify_gpu(int no_time_steps, char* class_npb, boolean* verified); static void x_solve_gpu(); __global__ static void x_solve_gpu_kernel(const double* rho_i, const double* us, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz); static void y_solve_gpu(); __global__ static void y_solve_gpu_kernel(const double* rho_i, const double* vs, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz); static void z_solve_gpu(); __global__ static void z_solve_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, const double* u, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz); /* sp */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif #if defined(PROFILING) printf(" PROFILING mode on\n"); #endif int niter, step, n3; double mflops, t, tmax; boolean verified; char class_npb; /* * --------------------------------------------------------------------- * read input file (if it exists), else take * defaults from parameters * --------------------------------------------------------------------- */ FILE* fp; if((fp=fopen("inputsp.data","r"))!=NULL){ int result; printf(" Reading from input file inputsp.data\n"); result=fscanf(fp,"%d", &niter); while(fgetc(fp)!='\n'); result=fscanf(fp,"%lf",&dt_host); while(fgetc(fp)!='\n'); result=fscanf(fp,"%d%d%d",&grid_points[0],&grid_points[1],&grid_points[2]); result++; fclose(fp); }else{ printf(" No input file inputsp.data. Using compiled defaults\n"); niter=NITER_DEFAULT; dt_host=DT_DEFAULT; grid_points[0]=PROBLEM_SIZE; grid_points[1]=PROBLEM_SIZE; grid_points[2]=PROBLEM_SIZE; } printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - SP Benchmark\n\n"); printf(" Size: %4dx%4dx%4d\n",grid_points[0],grid_points[1],grid_points[2]); printf(" Iterations: %4d dt: %10.6f\n",niter,dt_host); printf("\n"); if((grid_points[0]>IMAX)||(grid_points[1]>JMAX)||(grid_points[2]>KMAX)){ printf(" %d, %d, %d\n",grid_points[0],grid_points[1],grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); return 0; } nx=grid_points[0]; ny=grid_points[1]; nz=grid_points[2]; setup_gpu(); set_constants(); timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_ADD); timer_clear(PROFILING_COMPUTE_RHS_1); timer_clear(PROFILING_COMPUTE_RHS_2); timer_clear(PROFILING_ERROR_NORM_1); timer_clear(PROFILING_ERROR_NORM_2); timer_clear(PROFILING_EXACT_RHS_1); timer_clear(PROFILING_EXACT_RHS_2); timer_clear(PROFILING_EXACT_RHS_3); timer_clear(PROFILING_EXACT_RHS_4); timer_clear(PROFILING_INITIALIZE); timer_clear(PROFILING_RHS_NORM_1); timer_clear(PROFILING_RHS_NORM_2); timer_clear(PROFILING_TXINVR); timer_clear(PROFILING_X_SOLVE); timer_clear(PROFILING_Y_SOLVE); timer_clear(PROFILING_Z_SOLVE); #endif exact_rhs_gpu(); initialize_gpu(); /* * --------------------------------------------------------------------- * do one time step to touch all code, and reinitialize * --------------------------------------------------------------------- */ adi_gpu(); initialize_gpu(); timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_ADD); timer_clear(PROFILING_COMPUTE_RHS_1); timer_clear(PROFILING_COMPUTE_RHS_2); timer_clear(PROFILING_ERROR_NORM_1); timer_clear(PROFILING_ERROR_NORM_2); timer_clear(PROFILING_EXACT_RHS_1); timer_clear(PROFILING_EXACT_RHS_2); timer_clear(PROFILING_EXACT_RHS_3); timer_clear(PROFILING_EXACT_RHS_4); timer_clear(PROFILING_INITIALIZE); timer_clear(PROFILING_RHS_NORM_1); timer_clear(PROFILING_RHS_NORM_2); timer_clear(PROFILING_TXINVR); timer_clear(PROFILING_X_SOLVE); timer_clear(PROFILING_Y_SOLVE); timer_clear(PROFILING_Z_SOLVE); #endif timer_start(PROFILING_TOTAL_TIME);/*#start_timer*/ for(step=1;step<=niter;step++){ if((step%20)==0||step==1){printf(" Time step %4d\n",step);} adi_gpu(); } timer_stop(PROFILING_TOTAL_TIME);/*#stop_timer*/ tmax=timer_read(PROFILING_TOTAL_TIME); verify_gpu(niter, &class_npb, &verified); if(tmax!=0.0){ n3=grid_points[0]*grid_points[1]*grid_points[2]; t=(grid_points[0]+grid_points[1]+grid_points[2])/3.0; mflops=(881.174*(double)n3- 4683.91*(t*t)+ 11484.5*t- 19272.4)*(double)niter/(tmax*1000000.0); }else{ mflops=0.0; } char gpu_config[256]; char gpu_config_string[2048]; #if defined(PROFILING) sprintf(gpu_config, "%5s\t%25s\t%25s\t%25s\n", "GPU Kernel", "Threads Per Block", "Time in Seconds", "Time in Percentage"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-add", THREADS_PER_BLOCK_ON_ADD, timer_read(PROFILING_ADD), (timer_read(PROFILING_ADD)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-compute-rhs-1", THREADS_PER_BLOCK_ON_COMPUTE_RHS_1, timer_read(PROFILING_COMPUTE_RHS_1), (timer_read(PROFILING_COMPUTE_RHS_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-compute-rhs-2", THREADS_PER_BLOCK_ON_COMPUTE_RHS_2, timer_read(PROFILING_COMPUTE_RHS_2), (timer_read(PROFILING_COMPUTE_RHS_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-error-norm-1", THREADS_PER_BLOCK_ON_ERROR_NORM_1, timer_read(PROFILING_ERROR_NORM_1), (timer_read(PROFILING_ERROR_NORM_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-error-norm-2", THREADS_PER_BLOCK_ON_ERROR_NORM_2, timer_read(PROFILING_ERROR_NORM_2), (timer_read(PROFILING_ERROR_NORM_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-1", THREADS_PER_BLOCK_ON_EXACT_RHS_1, timer_read(PROFILING_EXACT_RHS_1), (timer_read(PROFILING_EXACT_RHS_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-2", THREADS_PER_BLOCK_ON_EXACT_RHS_2, timer_read(PROFILING_EXACT_RHS_2), (timer_read(PROFILING_EXACT_RHS_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-3", THREADS_PER_BLOCK_ON_EXACT_RHS_3, timer_read(PROFILING_EXACT_RHS_3), (timer_read(PROFILING_EXACT_RHS_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-exact-rhs-4", THREADS_PER_BLOCK_ON_EXACT_RHS_4, timer_read(PROFILING_EXACT_RHS_4), (timer_read(PROFILING_EXACT_RHS_4)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-initialize", THREADS_PER_BLOCK_ON_INITIALIZE, timer_read(PROFILING_INITIALIZE), (timer_read(PROFILING_INITIALIZE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-rhs-norm-1", THREADS_PER_BLOCK_ON_RHS_NORM_1, timer_read(PROFILING_RHS_NORM_1), (timer_read(PROFILING_RHS_NORM_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-rhs-norm-2", THREADS_PER_BLOCK_ON_RHS_NORM_2, timer_read(PROFILING_RHS_NORM_2), (timer_read(PROFILING_RHS_NORM_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-txinvr", THREADS_PER_BLOCK_ON_TXINVR, timer_read(PROFILING_TXINVR), (timer_read(PROFILING_TXINVR)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-x-solve", THREADS_PER_BLOCK_ON_X_SOLVE, timer_read(PROFILING_X_SOLVE), (timer_read(PROFILING_X_SOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-y-solve", THREADS_PER_BLOCK_ON_Y_SOLVE, timer_read(PROFILING_Y_SOLVE), (timer_read(PROFILING_Y_SOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " sp-z-solve", THREADS_PER_BLOCK_ON_Z_SOLVE, timer_read(PROFILING_Z_SOLVE), (timer_read(PROFILING_Z_SOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); #else sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-add", THREADS_PER_BLOCK_ON_ADD); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-compute-rhs-1", THREADS_PER_BLOCK_ON_COMPUTE_RHS_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-compute-rhs-2", THREADS_PER_BLOCK_ON_COMPUTE_RHS_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-error-norm-1", THREADS_PER_BLOCK_ON_ERROR_NORM_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-error-norm-2", THREADS_PER_BLOCK_ON_ERROR_NORM_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-1", THREADS_PER_BLOCK_ON_EXACT_RHS_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-2", THREADS_PER_BLOCK_ON_EXACT_RHS_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-3", THREADS_PER_BLOCK_ON_EXACT_RHS_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-exact-rhs-4", THREADS_PER_BLOCK_ON_EXACT_RHS_4); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-initialize", THREADS_PER_BLOCK_ON_INITIALIZE); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-rhs-norm-1", THREADS_PER_BLOCK_ON_RHS_NORM_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-rhs-norm-2", THREADS_PER_BLOCK_ON_RHS_NORM_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-txinvr", THREADS_PER_BLOCK_ON_TXINVR); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-x-solve", THREADS_PER_BLOCK_ON_X_SOLVE); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-y-solve", THREADS_PER_BLOCK_ON_Y_SOLVE); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " sp-z-solve", THREADS_PER_BLOCK_ON_Z_SOLVE); strcat(gpu_config_string, gpu_config); #endif c_print_results((char*)"SP", class_npb, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)COMPILERVERSION, (char*)LIBVERSION, (char*)CPU_MODEL, (char*)gpu_device_properties.name, gpu_config_string, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)"(none)"); release_gpu(); return 0; } /* * --------------------------------------------------------------------- * addition of update to the vector u * --------------------------------------------------------------------- */ static void add_gpu(){ #if defined(PROFILING) timer_start(PROFILING_ADD); #endif /* #KERNEL ADD */ int add_workload = nx * ny * nz; int add_threads_per_block = THREADS_PER_BLOCK_ON_ADD; int add_blocks_per_grid = (ceil((double)add_workload/(double)add_threads_per_block)); add_gpu_kernel<<< add_blocks_per_grid, add_threads_per_block>>>( u_device, rhs_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_ADD); #endif } /* * --------------------------------------------------------------------- * addition of update to the vector u * --------------------------------------------------------------------- */ __global__ static void add_gpu_kernel(double* u, const double* rhs, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } /* array(m,i,j,k) */ u(0,i,j,k)+=rhs(0,i,j,k); u(1,i,j,k)+=rhs(1,i,j,k); u(2,i,j,k)+=rhs(2,i,j,k); u(3,i,j,k)+=rhs(3,i,j,k); u(4,i,j,k)+=rhs(4,i,j,k); } static void adi_gpu(){ compute_rhs_gpu(); txinvr_gpu(); x_solve_gpu(); y_solve_gpu(); z_solve_gpu(); add_gpu(); } static void compute_rhs_gpu(){ #if defined(PROFILING) timer_start(PROFILING_COMPUTE_RHS_1); #endif /* #KERNEL COMPUTE RHS 1 */ int compute_rhs_1_workload = nx * ny * nz; int compute_rhs_1_threads_per_block = THREADS_PER_BLOCK_ON_COMPUTE_RHS_1; int compute_rhs_1_blocks_per_grid = (ceil((double)compute_rhs_1_workload/(double)compute_rhs_1_threads_per_block)); compute_rhs_gpu_kernel_1<<< compute_rhs_1_blocks_per_grid, compute_rhs_1_threads_per_block>>>( rho_i_device, us_device, vs_device, ws_device, speed_device, qs_device, square_device, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_COMPUTE_RHS_1); #endif #if defined(PROFILING) timer_start(PROFILING_COMPUTE_RHS_2); #endif /* #KERNEL COMPUTE RHS 2 */ int compute_rhs_2_threads_per_block; dim3 compute_rhs_2_blocks_per_grid(ny, nz); if(THREADS_PER_BLOCK_ON_COMPUTE_RHS_2 != nx){ compute_rhs_2_threads_per_block = nx; } else{ compute_rhs_2_threads_per_block = THREADS_PER_BLOCK_ON_COMPUTE_RHS_2; } compute_rhs_gpu_kernel_2<<< compute_rhs_2_blocks_per_grid, compute_rhs_2_threads_per_block>>>( rho_i_device, us_device, vs_device, ws_device, qs_device, square_device, rhs_device, forcing_device, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_COMPUTE_RHS_2); #endif } __global__ static void compute_rhs_gpu_kernel_1(double* rho_i, double* us, double* vs, double* ws, double* speed, double* qs, double* square, const double* u, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } using namespace constants_device; /* * --------------------------------------------------------------------- * compute the reciprocal of density, and the kinetic energy, * and the speed of sound. * --------------------------------------------------------------------- */ double rho_inv=1.0/u(0,i,j,k); double square_ijk; rho_i(i,j,k)=rho_inv; us(i,j,k)=u(1,i,j,k)*rho_inv; vs(i,j,k)=u(2,i,j,k)*rho_inv; ws(i,j,k)=u(3,i,j,k)*rho_inv; square(i,j,k)=square_ijk=0.5*(u(1,i,j,k)*u(1,i,j,k)+u(2,i,j,k)*u(2,i,j,k)+u(3,i,j,k)*u(3,i,j,k))*rho_inv; qs(i,j,k)=square_ijk*rho_inv; /* * --------------------------------------------------------------------- * (don't need speed and ainx until the lhs computation) * --------------------------------------------------------------------- */ speed(i,j,k)=sqrt(c1c2*rho_inv*(u(4,i,j,k)-square_ijk)); } __global__ static void compute_rhs_gpu_kernel_2(const double* rho_i, const double* us, const double* vs, const double* ws, const double* qs, const double* square, double* rhs, const double* forcing, const double* u, const int nx, const int ny, const int nz){ int i, j, k, m; k=blockIdx.y; j=blockIdx.x; i=threadIdx.x; double rtmp[5]; using namespace constants_device; /* * --------------------------------------------------------------------- * copy the exact forcing term to the right hand side; because * this forcing term is known, we can store it on the whole grid * including the boundary * --------------------------------------------------------------------- */ for(m=0;m<5;m++){rtmp[m]=forcing(m,i,j,k);} /* * --------------------------------------------------------------------- * compute xi-direction fluxes * --------------------------------------------------------------------- */ if(k>=1 && k<nz-1 && j>=1 && j<ny-1 && i>=1 && i<nx-1){ double uijk=us(i,j,k); double up1=us(i+1,j,k); double um1=us(i-1,j,k); rtmp[0]=rtmp[0]+dx1tx1*(u(0,i+1,j,k)-2.0*u(0,i,j,k)+u(0,i-1,j,k))-tx2*(u(1,i+1,j,k)-u(1,i-1,j,k)); rtmp[1]=rtmp[1]+dx2tx1*(u(1,i+1,j,k)-2.0*u(1,i,j,k)+u(1,i-1,j,k))+xxcon2*con43*(up1-2.0*uijk+um1)-tx2*(u(1,i+1,j,k)*up1-u(1,i-1,j,k)*um1+(u(4,i+1,j,k)-square(i+1,j,k)-u(4,i-1,j,k)+square(i-1,j,k))*c2); rtmp[2]=rtmp[2]+dx3tx1*(u(2,i+1,j,k)-2.0*u(2,i,j,k)+u(2,i-1,j,k))+xxcon2*(vs(i+1,j,k)-2.0*vs(i,j,k)+vs(i-1,j,k))-tx2*(u(2,i+1,j,k)*up1-u(2,i-1,j,k)*um1); rtmp[3]=rtmp[3]+dx4tx1*(u(3,i+1,j,k)-2.0*u(3,i,j,k)+u(3,i-1,j,k))+xxcon2*(ws(i+1,j,k)-2.0*ws(i,j,k)+ws(i-1,j,k))-tx2*(u(3,i+1,j,k)*up1-u(3,i-1,j,k)*um1); rtmp[4]=rtmp[4]+dx5tx1*(u(4,i+1,j,k)-2.0*u(4,i,j,k)+u(4,i-1,j,k))+xxcon3*(qs(i+1,j,k)-2.0*qs(i,j,k)+qs(i-1,j,k))+ xxcon4*(up1*up1-2.0*uijk*uijk+um1*um1)+xxcon5*(u(4,i+1,j,k)*rho_i(i+1,j,k)-2.0*u(4,i,j,k)*rho_i(i,j,k)+u(4,i-1,j,k)*rho_i(i-1,j,k))-tx2*((c1*u(4,i+1,j,k)-c2*square(i+1,j,k))*up1-(c1*u(4,i-1,j,k)-c2*square(i-1,j,k))*um1); /* * --------------------------------------------------------------------- * add fourth order xi-direction dissipation * --------------------------------------------------------------------- */ if(i==1){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(5.0*u(m,i,j,k)-4.0*u(m,i+1,j,k)+u(m,i+2,j,k));} }else if(i==2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(-4.0*u(m,i-1,j,k)+6.0*u(m,i,j,k)-4.0*u(m,i+1,j,k)+u(m,i+2,j,k));} }else if(i>=3 && i<nx-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i-2,j,k)-4.0*u(m,i-1,j,k)+6.0*u(m,i,j,k)-4.0*u(m,i+1,j,k)+u(m,i+2,j,k));} }else if(i==nx-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i-2,j,k)-4.0*u(m,i-1,j,k)+6.0*u(m,i,j,k)-4.0*u(m,i+1,j,k));} }else if(i==nx-2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i-2,j,k)-4.0*u(m,i-1,j,k) + 5.0*u(m,i,j,k));} } /* * --------------------------------------------------------------------- * compute eta-direction fluxes * --------------------------------------------------------------------- */ double vijk=vs(i,j,k); double vp1=vs(i,j+1,k); double vm1=vs(i,j-1,k); rtmp[0]=rtmp[0]+dy1ty1*(u(0,i,j+1,k)-2.0*u(0,i,j,k)+u(0,i,j-1,k))-ty2*(u(2,i,j+1,k)-u(2,i,j-1,k)); rtmp[1]=rtmp[1]+dy2ty1*(u(1,i,j+1,k)-2.0*u(1,i,j,k)+u(1,i,j-1,k))+yycon2*(us(i,j+1,k)-2.0*us(i,j,k)+us(i,j-1,k))-ty2*(u(1,i,j+1,k)*vp1-u(1,i,j-1,k)*vm1); rtmp[2]=rtmp[2]+dy3ty1*(u(2,i,j+1,k)-2.0*u(2,i,j,k)+u(2,i,j-1,k))+yycon2*con43*(vp1-2.0*vijk+vm1)-ty2*(u(2,i,j+1,k)*vp1-u(2,i,j-1,k)*vm1+(u(4,i,j+1,k)-square(i,j+1,k)-u(4,i,j-1,k)+square(i,j-1,k))*c2); rtmp[3]=rtmp[3]+dy4ty1*(u(3,i,j+1,k)-2.0*u(3,i,j,k)+u(3,i,j-1,k))+yycon2*(ws(i,j+1,k)-2.0*ws(i,j,k)+ws(i,j-1,k))-ty2*(u(3,i,j+1,k)*vp1-u(3,i,j-1,k)*vm1); rtmp[4]=rtmp[4]+dy5ty1*(u(4,i,j+1,k)-2.0*u(4,i,j,k)+u(4,i,j-1,k))+yycon3*(qs(i,j+1,k)-2.0*qs(i,j,k)+qs(i,j-1,k))+yycon4*(vp1*vp1-2.0*vijk*vijk+vm1*vm1)+yycon5*(u(4,i,j+1,k)*rho_i(i,j+1,k)-2.0*u(4,i,j,k)*rho_i(i,j,k)+u(4,i,j-1,k)*rho_i(i,j-1,k))-ty2*((c1*u(4,i,j+1,k)-c2*square(i,j+1,k))*vp1-(c1*u(4,i,j-1,k)-c2*square(i,j-1,k))*vm1); /* * --------------------------------------------------------------------- * add fourth order eta-direction dissipation * --------------------------------------------------------------------- */ if(j==1){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(5.0*u(m,i,j,k)-4.0*u(m,i,j+1,k)+u(m,i,j+2,k));} }else if(j==2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(-4.0*u(m,i,j-1,k)+6.0*u(m,i,j,k)-4.0*u(m,i,j+1,k)+u(m,i,j+2,k));} }else if(j>=3 && j<ny-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j-2,k)-4.0*u(m,i,j-1,k)+6.0*u(m,i,j,k)-4.0*u(m,i,j+1,k)+u(m,i,j+2,k));} }else if(j==ny-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j-2,k)-4.0*u(m,i,j-1,k)+6.0*u(m,i,j,k)-4.0*u(m,i,j+1,k));} }else if(j==ny-2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j-2,k)-4.0*u(m,i,j-1,k)+5.0*u(m,i,j,k));} } /* * --------------------------------------------------------------------- * compute zeta-direction fluxes * --------------------------------------------------------------------- */ double wijk=ws(i,j,k); double wp1=ws(i,j,k+1); double wm1=ws(i,j,k-1); rtmp[0]=rtmp[0]+dz1tz1*(u(0,i,j,k+1)-2.0*u(0,i,j,k)+u(0,i,j,k-1))-tz2*(u(3,i,j,k+1)-u(3,i,j,k-1)); rtmp[1]=rtmp[1]+dz2tz1*(u(1,i,j,k+1)-2.0*u(1,i,j,k)+u(1,i,j,k-1))+zzcon2*(us(i,j,k+1)-2.0*us(i,j,k)+us(i,j,k-1))-tz2*(u(1,i,j,k+1)*wp1-u(1,i,j,k-1)*wm1); rtmp[2]=rtmp[2]+dz3tz1*(u(2,i,j,k+1)-2.0*u(2,i,j,k)+u(2,i,j,k-1))+zzcon2*(vs(i,j,k+1)-2.0*vs(i,j,k)+vs(i,j,k-1))-tz2*(u(2,i,j,k+1)*wp1-u(2,i,j,k-1)*wm1); rtmp[3]=rtmp[3]+dz4tz1*(u(3,i,j,k+1)-2.0*u(3,i,j,k)+u(3,i,j,k-1))+zzcon2*con43*(wp1-2.0*wijk+wm1)-tz2*(u(3,i,j,k+1)*wp1-u(3,i,j,k-1)*wm1+(u(4,i,j,k+1)-square(i,j,k+1)-u(4,i,j,k-1)+square(i,j,k-1))*c2); rtmp[4]=rtmp[4]+dz5tz1*(u(4,i,j,k+1)-2.0*u(4,i,j,k)+u(4,i,j,k-1))+zzcon3*(qs(i,j,k+1)-2.0*qs(i,j,k)+qs(i,j,k-1))+zzcon4*(wp1*wp1-2.0*wijk*wijk+wm1*wm1)+zzcon5*(u(4,i,j,k+1)*rho_i(i,j,k+1)-2.0*u(4,i,j,k)*rho_i(i,j,k)+u(4,i,j,k-1)*rho_i(i,j,k-1))-tz2*((c1*u(4,i,j,k+1)-c2*square(i,j,k+1))*wp1-(c1*u(4,i,j,k-1)-c2*square(i,j,k-1))*wm1); /* * --------------------------------------------------------------------- * add fourth order zeta-direction dissipation * --------------------------------------------------------------------- */ if(k==1){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(5.0*u(m,i,j,k)-4.0*u(m,i,j,k+1)+u(m,i,j,k+2));} }else if(k==2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(-4.0*u(m,i,j,k-1)+6.0*u(m,i,j,k)-4.0*u(m,i,j,k+1)+u(m,i,j,k+2));} }else if(k>=3 && k<nz-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j,k-2)-4.0*u(m,i,j,k-1)+6.0*u(m,i,j,k)-4.0*u(m,i,j,k+1)+u(m,i,j,k+2));} }else if(k==nz-3){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j,k-2)-4.0*u(m,i,j,k-1)+6.0*u(m,i,j,k)-4.0*u(m,i,j,k+1));} }else if(k==nz-2){ for(m=0;m<5;m++){rtmp[m]=rtmp[m]-dssp*(u(m,i,j,k-2)-4.0*u(m,i,j,k-1)+5.0*u(m,i,j,k));} } for(m=0;m<5;m++){rtmp[m]*=dt;} } for(m=0;m<5;m++){rhs(m,i,j,k)=rtmp[m];} } /* * --------------------------------------------------------------------- * this function computes the norm of the difference between the * computed solution and the exact solution * --------------------------------------------------------------------- */ static void error_norm_gpu(double rms[]){ #if defined(PROFILING) timer_start(PROFILING_ERROR_NORM_1); #endif /* #KERNEL ERROR NORM 1 */ int error_norm_1_threads_per_block = THREADS_PER_BLOCK_ON_ERROR_NORM_1; dim3 error_norm_1_blocks_per_grid(ny, nx); error_norm_gpu_kernel_1<<< error_norm_1_blocks_per_grid, error_norm_1_threads_per_block>>>( rms_buffer_device, u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_ERROR_NORM_1); #endif #if defined(PROFILING) timer_start(PROFILING_ERROR_NORM_2); #endif /* #KERNEL ERROR NORM 2 */ int error_norm_2_threads_per_block = THREADS_PER_BLOCK_ON_ERROR_NORM_2; int error_norm_2_blocks_per_grid = 1; error_norm_gpu_kernel_2<<< error_norm_2_blocks_per_grid, error_norm_2_threads_per_block, sizeof(double)*error_norm_2_threads_per_block*5>>>( rms_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_ERROR_NORM_2); #endif cudaMemcpy(rms, rms_buffer_device, 5*sizeof(double), cudaMemcpyDeviceToHost); } __global__ static void error_norm_gpu_kernel_1(double* rms, const double* u, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, u_exact[5], rms_loc[5]; j=blockIdx.x*blockDim.x+threadIdx.x; i=blockIdx.y*blockDim.y+threadIdx.y; if(j>=ny || i>=nx){return;} using namespace constants_device; for(m=0;m<5;m++){rms_loc[m]=0.0;} xi=(double)i*dnxm1; eta=(double)j*dnym1; for(k=0; k<nz; k++){ zeta=(double)k*dnzm1; exact_solution_gpu_device(xi, eta, zeta, u_exact); for(m=0; m<5; m++){ double add=u(m,i,j,k)-u_exact[m]; rms_loc[m]+=add*add; } } for(m=0;m<5;m++){rms[i+nx*(j+ny*m)]=rms_loc[m];} } __global__ static void error_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz){ int i, m, maxpos, dist; double* buffer = (double*)extern_share_data; i = threadIdx.x; for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]=0.0;} while(i<nx*ny){ for(m=0;m<5;m++){buffer[threadIdx.x+(m*blockDim.x)]+=rms[i+nx*ny*m];} i+=blockDim.x; } maxpos=blockDim.x; dist=(maxpos+1)/2; i=threadIdx.x; __syncthreads(); while(maxpos>1){ if(i<dist && i+dist<maxpos){ for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]+=buffer[(i+dist)+(m*blockDim.x)];} } maxpos=dist; dist=(dist+1)/2; __syncthreads(); } m=threadIdx.x; if(m<5){rms[m]=sqrt(buffer[0+(m*blockDim.x)]/((double)(nz-2)*(double)(ny-2)*(double)(nx-2)));} } /* * --------------------------------------------------------------------- * compute the right hand side based on exact solution * --------------------------------------------------------------------- */ static void exact_rhs_gpu(){ #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_1); #endif /* #KERNEL EXACT RHS 1 */ int rhs1_workload = nx * ny * nz; int rhs1_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_1; int rhs1_blocks_per_grid = (ceil((double)rhs1_workload/(double)rhs1_threads_per_block)); exact_rhs_gpu_kernel_1<<< rhs1_blocks_per_grid, rhs1_threads_per_block>>>( forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_1); #endif #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_2); #endif /* #KERNEL EXACT RHS 2 */ int rhs2_threads_per_block; dim3 rhs2_blocks_per_grid(nz, ny); if(THREADS_PER_BLOCK_ON_EXACT_RHS_2 > nx){ rhs2_threads_per_block = nx; } else{ rhs2_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_2; } exact_rhs_gpu_kernel_2<<< rhs2_blocks_per_grid, rhs2_threads_per_block>>>( forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_2); #endif #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_3); #endif /* #KERNEL EXACT RHS 3 */ int rhs3_threads_per_block; dim3 rhs3_blocks_per_grid(nz, nx); if(THREADS_PER_BLOCK_ON_EXACT_RHS_3 > ny){ rhs3_threads_per_block = ny; } else{ rhs3_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_3; } exact_rhs_gpu_kernel_3<<< rhs3_blocks_per_grid, rhs3_threads_per_block>>>( forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_3); #endif #if defined(PROFILING) timer_start(PROFILING_EXACT_RHS_4); #endif /* #KERNEL EXACT RHS 4 */ int rhs4_threads_per_block; dim3 rhs4_blocks_per_grid(ny, nx); if(THREADS_PER_BLOCK_ON_EXACT_RHS_4 > nz){ rhs4_threads_per_block = nz; } else{ rhs4_threads_per_block = THREADS_PER_BLOCK_ON_EXACT_RHS_4; } exact_rhs_gpu_kernel_4<<< rhs4_blocks_per_grid, rhs4_threads_per_block>>>( forcing_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_EXACT_RHS_4); #endif } __global__ static void exact_rhs_gpu_kernel_1(double* forcing, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } /* * --------------------------------------------------------------------- * initialize * --------------------------------------------------------------------- */ /* array(m,i,j,k) */ forcing(0,i,j,k)=0.0; forcing(1,i,j,k)=0.0; forcing(2,i,j,k)=0.0; forcing(3,i,j,k)=0.0; forcing(4,i,j,k)=0.0; } __global__ static void exact_rhs_gpu_kernel_2(double* forcing, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, dtemp[5], dtpp; double ue[5][5], buf[3][5], cuf[3], q[3]; k=blockIdx.x*blockDim.x+threadIdx.x+1; j=blockIdx.y*blockDim.y+threadIdx.y+1; if(k>=(nz-1) || j>=(ny-1)){return;} using namespace constants_device; zeta=(double)k*dnzm1; eta=(double)j*dnym1; /* * --------------------------------------------------------------------- * xi-direction flux differences * --------------------------------------------------------------------- */ for(i=0; i<3; i++){ xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[i+1][m]=dtemp[m];} dtpp=1.0/dtemp[0]; for(m=1;m<5;m++){buf[i][m]=dtpp*dtemp[m];} cuf[i]=buf[i][1]*buf[i][1]; buf[i][0]=cuf[i]+buf[i][2]*buf[i][2]+buf[i][3]*buf[i][3]; q[i]=0.5*(buf[i][1]*ue[i+1][1]+buf[i][2]*ue[i+1][2]+buf[i][3]*ue[i+1][3]); } for(i=1; i<nx-1; i++){ if(i+2<nx){ xi=(double)(i+2)*dnxm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[4][m]=dtemp[m];} } dtemp[0]=0.0-tx2*(ue[3][1]-ue[1][1])+dx1tx1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]); dtemp[1]=0.0-tx2*((ue[3][1]*buf[2][1]+c2*(ue[3][4]-q[2]))-(ue[1][1]*buf[0][1]+c2*(ue[1][4]-q[0])))+xxcon1*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dx2tx1*(ue[3][1]-2.0*ue[2][1]+ue[1][1]); dtemp[2]=0.0-tx2*(ue[3][2]*buf[2][1]-ue[1][2]*buf[0][1])+xxcon2*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dx3tx1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]); dtemp[3]=0.0-tx2*(ue[3][3]*buf[2][1]-ue[1][3]*buf[0][1])+xxcon2*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dx4tx1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]); dtemp[4]=0.0-tx2*(buf[2][1]*(c1*ue[3][4]-c2*q[2])-buf[0][1]*(c1*ue[1][4]-c2*q[0]))+0.5*xxcon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+xxcon4*(cuf[2]-2.0*cuf[1]+cuf[0])+xxcon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dx5tx1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]); /* * --------------------------------------------------------------------- * fourth-order dissipation * --------------------------------------------------------------------- */ if(i==1){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(5.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(i==2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(i>=3 && i<nx-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(i==nx-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]);} }else if(i==nx-2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);} } for(m=0;m<5;m++){ ue[0][m]=ue[1][m]; ue[1][m]=ue[2][m]; ue[2][m]=ue[3][m]; ue[3][m]=ue[4][m]; buf[0][m]=buf[1][m]; buf[1][m]=buf[2][m]; } cuf[0]=cuf[1]; cuf[1]=cuf[2]; q[0]=q[1]; q[1]=q[2]; if(i<nx-2){ dtpp=1.0/ue[3][0]; for(m=1;m<5;m++){buf[2][m]=dtpp*ue[3][m];} cuf[2]=buf[2][1]*buf[2][1]; buf[2][0]=cuf[2]+buf[2][2]*buf[2][2]+buf[2][3]*buf[2][3]; q[2]=0.5*(buf[2][1]*ue[3][1]+buf[2][2]*ue[3][2]+buf[2][3]*ue[3][3]); } } } __global__ static void exact_rhs_gpu_kernel_3(double* forcing, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, dtemp[5], dtpp; double ue[5][5], buf[3][5], cuf[3], q[3]; k=blockIdx.x*blockDim.x+threadIdx.x+1; i=blockIdx.y*blockDim.y+threadIdx.y+1; if(k>=nz-1 || i>=nx-1){return;} using namespace constants_device; zeta=(double)k*dnzm1; xi=(double)i*dnxm1; /* * --------------------------------------------------------------------- * eta-direction flux differences * --------------------------------------------------------------------- */ for(j=0; j<3; j++){ eta=(double)j*dnym1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[j+1][m]=dtemp[m];} dtpp=1.0/dtemp[0]; for(m=1;m<5;m++){buf[j][m]=dtpp*dtemp[m];} cuf[j]=buf[j][2]*buf[j][2]; buf[j][0]=cuf[j]+buf[j][1]*buf[j][1]+buf[j][3]*buf[j][3]; q[j]=0.5*(buf[j][1]*ue[j+1][1]+buf[j][2]*ue[j+1][2]+buf[j][3]*ue[j+1][3]); } for(j=1; j<ny-1; j++){ if(j+2<ny){ eta=(double)(j+2)*dnym1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[4][m]=dtemp[m];} } dtemp[0]=forcing(0,i,j,k)-ty2*(ue[3][2]-ue[1][2])+dy1ty1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]); dtemp[1]=forcing(1,i,j,k)-ty2*(ue[3][1]*buf[2][2]-ue[1][1]*buf[0][2])+yycon2*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dy2ty1*(ue[3][1]-2.0*ue[2][1]+ ue[1][1]); dtemp[2]=forcing(2,i,j,k)-ty2*((ue[3][2]*buf[2][2]+c2*(ue[3][4]-q[2]))-(ue[1][2]*buf[0][2]+c2*(ue[1][4]-q[0])))+yycon1*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dy3ty1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]); dtemp[3]=forcing(3,i,j,k)-ty2*(ue[3][3]*buf[2][2]-ue[1][3]*buf[0][2])+yycon2*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dy4ty1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]); dtemp[4]=forcing(4,i,j,k)-ty2*(buf[2][2]*(c1*ue[3][4]-c2*q[2])-buf[0][2]*(c1*ue[1][4]-c2*q[0]))+0.5*yycon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+yycon4*(cuf[2]-2.0*cuf[1]+cuf[0])+yycon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dy5ty1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]); /* * --------------------------------------------------------------------- * fourth-order dissipation * --------------------------------------------------------------------- */ if(j==1){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(5.0*ue[2][m]-4.0*ue[3][m] +ue[4][m]);} }else if(j==2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(j>=3 && j<ny-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(j==ny-3){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]);} }else if(j==ny-2){ for(m=0;m<5;m++){forcing(m,i,j,k)=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);} } for(m=0; m<5; m++){ ue[0][m]=ue[1][m]; ue[1][m]=ue[2][m]; ue[2][m]=ue[3][m]; ue[3][m]=ue[4][m]; buf[0][m]=buf[1][m]; buf[1][m]=buf[2][m]; } cuf[0]=cuf[1]; cuf[1]=cuf[2]; q[0]=q[1]; q[1]=q[2]; if(j<ny-2){ dtpp=1.0/ue[3][0]; for(m=1;m<5;m++){buf[2][m]=dtpp*ue[3][m];} cuf[2]=buf[2][2]*buf[2][2]; buf[2][0]=cuf[2]+buf[2][1]*buf[2][1]+buf[2][3]*buf[2][3]; q[2]=0.5*(buf[2][1]*ue[3][1]+buf[2][2]*ue[3][2]+buf[2][3]*ue[3][3]); } } } __global__ static void exact_rhs_gpu_kernel_4(double* forcing, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, dtpp, dtemp[5]; double ue[5][5], buf[3][5], cuf[3], q[3]; j=blockIdx.x*blockDim.x+threadIdx.x+1; i=blockIdx.y*blockDim.y+threadIdx.y+1; if(j>=ny-1 || i>=nx-1){return;} using namespace constants_device; eta=(double)j*dnym1; xi=(double)i*dnxm1; /* * --------------------------------------------------------------------- * zeta-direction flux differences * --------------------------------------------------------------------- */ for(k=0; k<3; k++){ zeta=(double)k*dnzm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[k+1][m]=dtemp[m];} dtpp=1.0/dtemp[0]; for(m=1;m<5;m++){buf[k][m]=dtpp*dtemp[m];} cuf[k]=buf[k][3]*buf[k][3]; buf[k][0]=cuf[k]+buf[k][1]*buf[k][1]+buf[k][2]*buf[k][2]; q[k]=0.5*(buf[k][1]*ue[k+1][1]+buf[k][2]*ue[k+1][2]+buf[k][3]*ue[k+1][3]); } for(k=1; k<nz-1; k++){ if(k+2<nz){ zeta=(double)(k+2)*dnzm1; exact_solution_gpu_device(xi, eta, zeta, dtemp); for(m=0;m<5;m++){ue[4][m]=dtemp[m];} } dtemp[0]=forcing(0,i,j,k)-tz2*(ue[3][3]-ue[1][3])+dz1tz1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]); dtemp[1]=forcing(1,i,j,k)-tz2*(ue[3][1]*buf[2][3]-ue[1][1]*buf[0][3])+zzcon2*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dz2tz1*(ue[3][1]-2.0*ue[2][1]+ue[1][1]); dtemp[2]=forcing(2,i,j,k)-tz2*(ue[3][2]*buf[2][3]-ue[1][2]*buf[0][3])+zzcon2*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dz3tz1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]); dtemp[3]=forcing(3,i,j,k)-tz2*((ue[3][3]*buf[2][3]+c2*(ue[3][4]-q[2]))-(ue[1][3]*buf[0][3]+c2*(ue[1][4]-q[0])))+zzcon1*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dz4tz1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]); dtemp[4]=forcing(4,i,j,k)-tz2*(buf[2][3]*(c1*ue[3][4]-c2*q[2])-buf[0][3]*(c1*ue[1][4]-c2*q[0]))+0.5*zzcon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+zzcon4*(cuf[2]-2.0*cuf[1]+cuf[0])+zzcon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dz5tz1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]); /* * --------------------------------------------------------------------- * fourth-order dissipation * --------------------------------------------------------------------- */ if(k==1){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(5.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(k==2){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(k>=3 && k<nz-3){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);} }else if(k==nz-3){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]);} }else if(k==nz-2){ for(m=0;m<5;m++){dtemp[m]=dtemp[m]-dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);} } /* * --------------------------------------------------------------------- * now change the sign of the forcing function * --------------------------------------------------------------------- */ for(m=0;m<5;m++){forcing(m,i,j,k)=-1.0*dtemp[m];} for(m=0; m<5; m++){ ue[0][m]=ue[1][m]; ue[1][m]=ue[2][m]; ue[2][m]=ue[3][m]; ue[3][m]=ue[4][m]; buf[0][m]=buf[1][m]; buf[1][m]=buf[2][m]; } cuf[0]=cuf[1]; cuf[1]=cuf[2]; q[0]=q[1]; q[1]=q[2]; if(k<nz-2){ dtpp=1.0/ue[3][0]; for(m=1;m<5;m++){buf[2][m]=dtpp*ue[3][m];} cuf[2]=buf[2][3]*buf[2][3]; buf[2][0]=cuf[2]+buf[2][1]*buf[2][1]+buf[2][2]*buf[2][2]; q[2]=0.5*(buf[2][1]*ue[3][1]+buf[2][2]*ue[3][2]+buf[2][3]*ue[3][3]); } } } /* * --------------------------------------------------------------------- * this function returns the exact solution at point xi, eta, zeta * --------------------------------------------------------------------- */ __device__ static void exact_solution_gpu_device(const double xi, const double eta, const double zeta, double* dtemp){ using namespace constants_device; for(int m=0; m<5; m++){ dtemp[m]=ce[0][m]+xi* (ce[1][m]+xi* (ce[4][m]+xi* (ce[7][m]+xi* ce[10][m])))+eta* (ce[2][m]+eta* (ce[5][m]+eta* (ce[8][m]+eta* ce[11][m])))+zeta* (ce[3][m]+zeta* (ce[6][m]+zeta* (ce[9][m]+zeta* ce[12][m]))); } } /* * --------------------------------------------------------------------- * this subroutine initializes the field variable u using * tri-linear transfinite interpolation of the boundary values * --------------------------------------------------------------------- */ static void initialize_gpu(){ #if defined(PROFILING) timer_start(PROFILING_INITIALIZE); #endif /* #KERNEL INITIALIZE */ int initialize_threads_per_block; dim3 initialize_blocks_per_grid(nz, ny); if(THREADS_PER_BLOCK_ON_INITIALIZE != nx){ initialize_threads_per_block = nx; } else{ initialize_threads_per_block = THREADS_PER_BLOCK_ON_INITIALIZE; } initialize_gpu_kernel<<< initialize_blocks_per_grid, initialize_threads_per_block>>>( u_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_INITIALIZE); #endif } __global__ static void initialize_gpu_kernel(double* u, const int nx, const int ny, const int nz){ int i, j, k, m; double xi, eta, zeta, temp[5]; double Pface11[5], Pface12[5], Pface21[5], Pface22[5], Pface31[5], Pface32[5]; k=blockIdx.x; j=blockIdx.y; i=threadIdx.x; using namespace constants_device; /* * --------------------------------------------------------------------- * later (in compute_rhs_gpu) we compute 1/u for every element. a few of * the corner elements are not used, but it convenient (and faster) * to compute the whole thing with a simple loop. make sure those * values are nonzero by initializing the whole thing here. * --------------------------------------------------------------------- */ u(0,i,j,k)=1.0; u(1,i,j,k)=0.0; u(2,i,j,k)=0.0; u(3,i,j,k)=0.0; u(4,i,j,k)=1.0; /* * --------------------------------------------------------------------- * first store the "interpolated" values everywhere on the grid * --------------------------------------------------------------------- */ zeta=(double)k*dnzm1; eta=(double)j*dnym1; xi=(double)i*dnxm1; exact_solution_gpu_device(0.0, eta, zeta, Pface11); exact_solution_gpu_device(1.0, eta, zeta, Pface12); exact_solution_gpu_device(xi, 0.0, zeta, Pface21); exact_solution_gpu_device(xi, 1.0, zeta, Pface22); exact_solution_gpu_device(xi, eta, 0.0, Pface31); exact_solution_gpu_device(xi, eta, 1.0, Pface32); for(m=0; m<5; m++){ double Pxi=xi*Pface12[m]+(1.0-xi)*Pface11[m]; double Peta=eta*Pface22[m]+(1.0-eta)*Pface21[m]; double Pzeta=zeta*Pface32[m]+(1.0-zeta)*Pface31[m]; u(m,i,j,k)=Pxi+Peta+Pzeta-Pxi*Peta-Pxi*Pzeta-Peta*Pzeta+Pxi*Peta*Pzeta; } /* * --------------------------------------------------------------------- * now store the exact values on the boundaries * --------------------------------------------------------------------- * west face * --------------------------------------------------------------------- */ xi=0.0; if(i==0){ zeta=(double)k*dnzm1; eta=(double)j*dnym1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * east face * --------------------------------------------------------------------- */ xi=1.0; if(i==nx-1){ zeta=(double)k*dnzm1; eta=(double)j*dnym1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * south face * --------------------------------------------------------------------- */ eta=0.0; if(j==0){ zeta=(double)k*dnzm1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * north face * --------------------------------------------------------------------- */ eta=1.0; if(j==ny-1){ zeta=(double)k*dnzm1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * bottom face * --------------------------------------------------------------------- */ zeta=0.0; if(k==0){ eta=(double)j*dnym1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } /* * --------------------------------------------------------------------- * top face * --------------------------------------------------------------------- */ zeta=1.0; if(k==nz-1){ eta=(double)j*dnym1; xi=(double)i*dnxm1; exact_solution_gpu_device(xi, eta, zeta, temp); for(m=0;m<5;m++){u(m,i,j,k)=temp[m];} } } static void release_gpu(){ cudaFree(u_device); cudaFree(forcing_device); cudaFree(rhs_device); cudaFree(rho_i_device); cudaFree(us_device); cudaFree(vs_device); cudaFree(ws_device); cudaFree(qs_device); cudaFree(speed_device); cudaFree(square_device); cudaFree(lhs_device); cudaFree(rhs_buffer_device); cudaFree(rms_buffer_device); } static void rhs_norm_gpu(double rms[]){ #if defined(PROFILING) timer_start(PROFILING_RHS_NORM_1); #endif /* #KERNEL RHS NORM 1 */ int rhs_norm_1_threads_per_block = THREADS_PER_BLOCK_ON_RHS_NORM_1; dim3 rhs_norm_1_blocks_per_grid(ny, nx); rhs_norm_gpu_kernel_1<<< rhs_norm_1_blocks_per_grid, rhs_norm_1_threads_per_block>>>( rms_buffer_device, rhs_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_RHS_NORM_1); #endif #if defined(PROFILING) timer_start(PROFILING_RHS_NORM_2); #endif /* #KERNEL RHS NORM 2 */ int rhs_norm_2_threads_per_block = THREADS_PER_BLOCK_ON_RHS_NORM_2; int rhs_norm_2_blocks_per_grid = 1; rhs_norm_gpu_kernel_2<<< rhs_norm_2_blocks_per_grid, rhs_norm_2_threads_per_block, sizeof(double)*rhs_norm_2_threads_per_block*5>>>( rms_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_RHS_NORM_2); #endif cudaMemcpy(rms, rms_buffer_device, 5*sizeof(double), cudaMemcpyDeviceToHost); } __global__ static void rhs_norm_gpu_kernel_1(double* rms, const double* rhs, const int nx, const int ny, const int nz){ int i, j, k, m; double rms_loc[5]; j=blockIdx.x*blockDim.x+threadIdx.x; i=blockIdx.y*blockDim.y+threadIdx.y; if(j>=ny || i>=nx){return;} for(m=0;m<5;m++){rms_loc[m]=0.0;} if(i>=1 && i<nx-1 && j>=1 && j<ny-1){ for(k=1; k<nz-1; k++){ for(int m=0; m<5; m++){ double add=rhs(m,i,j,k); rms_loc[m]+=add*add; } } } for(m=0;m<5;m++){rms[i+nx*(j+ny*m)]=rms_loc[m];} } __global__ static void rhs_norm_gpu_kernel_2(double* rms, const int nx, const int ny, const int nz){ int i, m, maxpos, dist; double* buffer = (double*)extern_share_data; i = threadIdx.x; for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]=0.0;} while(i<nx*ny){ for(m=0;m<5;m++){buffer[threadIdx.x+(m*blockDim.x)]+=rms[i+nx*ny*m];} i+=blockDim.x; } maxpos=blockDim.x; dist=(maxpos+1)/2; i=threadIdx.x; __syncthreads(); while(maxpos>1){ if(i<dist && i+dist<maxpos){ for(m=0;m<5;m++){buffer[i+(m*blockDim.x)]+=buffer[(i+dist)+(m*blockDim.x)];} } maxpos=dist; dist=(dist+1)/2; __syncthreads(); } m=threadIdx.x; if(m<5){rms[m]=sqrt(buffer[0+(m*blockDim.x)]/((double)(nz-2)*(double)(ny-2)*(double)(nx-2)));} } static void set_constants(){ double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3, dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4, dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt, dxmax, dymax, dzmax, xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt, dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16, ce[13][5]; /* */ ce[0][0]=2.0; ce[1][0]=0.0; ce[2][0]=0.0; ce[3][0]=4.0; ce[4][0]=5.0; ce[5][0]=3.0; ce[6][0]=0.5; ce[7][0]=0.02; ce[8][0]=0.01; ce[9][0]=0.03; ce[10][0]=0.5; ce[11][0]=0.4; ce[12][0]=0.3; /* */ ce[0][1]=1.0; ce[1][1]=0.0; ce[2][1]=0.0; ce[3][1]=0.0; ce[4][1]=1.0; ce[5][1]=2.0; ce[6][1]=3.0; ce[7][1]=0.01; ce[8][1]=0.03; ce[9][1]=0.02; ce[10][1]=0.4; ce[11][1]=0.3; ce[12][1]=0.5; /* */ ce[0][2]=2.0; ce[1][2]=2.0; ce[2][2]=0.0; ce[3][2]=0.0; ce[4][2]=0.0; ce[5][2]=2.0; ce[6][2]=3.0; ce[7][2]=0.04; ce[8][2]=0.03; ce[9][2]=0.05; ce[10][2]=0.3; ce[11][2]=0.5; ce[12][2]=0.4; /* */ ce[0][3]=2.0; ce[1][3]=2.0; ce[2][3]=0.0; ce[3][3]=0.0; ce[4][3]=0.0; ce[5][3]=2.0; ce[6][3]=3.0; ce[7][3]=0.03; ce[8][3]=0.05; ce[9][3]=0.04; ce[10][3]=0.2; ce[11][3]=0.1; ce[12][3]=0.3; /* */ ce[0][4]=5.0; ce[1][4]=4.0; ce[2][4]=3.0; ce[3][4]=2.0; ce[4][4]=0.1; ce[5][4]=0.4; ce[6][4]=0.3; ce[7][4]=0.05; ce[8][4]=0.04; ce[9][4]=0.03; ce[10][4]=0.1; ce[11][4]=0.3; ce[12][4]=0.2; /* */ bt=sqrt(0.5); dt=dt_host; c1=1.4; c2=0.4; c3=0.1; c4=1.0; c5=1.4; dnxm1=1.0/(double)(grid_points[0]-1); dnym1=1.0/(double)(grid_points[1]-1); dnzm1=1.0/(double)(grid_points[2]-1); c1c2=c1*c2; c1c5=c1*c5; c3c4=c3*c4; c1345=c1c5*c3c4; conz1=(1.0-c1c5); tx1=1.0/(dnxm1*dnxm1); tx2=1.0/(2.0*dnxm1); tx3=1.0/dnxm1; ty1=1.0/(dnym1*dnym1); ty2=1.0/(2.0*dnym1); ty3=1.0/dnym1; tz1=1.0/(dnzm1*dnzm1); tz2=1.0/(2.0*dnzm1); tz3=1.0/dnzm1; dx1=0.75; dx2=0.75; dx3=0.75; dx4=0.75; dx5=0.75; dy1=0.75; dy2=0.75; dy3=0.75; dy4=0.75; dy5=0.75; dz1=1.0; dz2=1.0; dz3=1.0; dz4=1.0; dz5=1.0; dxmax=max(dx3, dx4); dymax=max(dy2, dy4); dzmax=max(dz2, dz3); dssp=0.25*max(dx1, max(dy1, dz1)); c4dssp=4.0*dssp; c5dssp=5.0*dssp; dttx1=dt*tx1; dttx2=dt*tx2; dtty1=dt*ty1; dtty2=dt*ty2; dttz1=dt*tz1; dttz2=dt*tz2; c2dttx1=2.0*dttx1; c2dtty1=2.0*dtty1; c2dttz1=2.0*dttz1; dtdssp=dt*dssp; comz1=dtdssp; comz4=4.0*dtdssp; comz5=5.0*dtdssp; comz6=6.0*dtdssp; c3c4tx3=c3c4*tx3; c3c4ty3=c3c4*ty3; c3c4tz3=c3c4*tz3; dx1tx1=dx1*tx1; dx2tx1=dx2*tx1; dx3tx1=dx3*tx1; dx4tx1=dx4*tx1; dx5tx1=dx5*tx1; dy1ty1=dy1*ty1; dy2ty1=dy2*ty1; dy3ty1=dy3*ty1; dy4ty1=dy4*ty1; dy5ty1=dy5*ty1; dz1tz1=dz1*tz1; dz2tz1=dz2*tz1; dz3tz1=dz3*tz1; dz4tz1=dz4*tz1; dz5tz1=dz5*tz1; c2iv=2.5; con43=4.0/3.0; con16=1.0/6.0; xxcon1=c3c4tx3*con43*tx3; xxcon2=c3c4tx3*tx3; xxcon3=c3c4tx3*conz1*tx3; xxcon4=c3c4tx3*con16*tx3; xxcon5=c3c4tx3*c1c5*tx3; yycon1=c3c4ty3*con43*ty3; yycon2=c3c4ty3*ty3; yycon3=c3c4ty3*conz1*ty3; yycon4=c3c4ty3*con16*ty3; yycon5=c3c4ty3*c1c5*ty3; zzcon1=c3c4tz3*con43*tz3; zzcon2=c3c4tz3*tz3; zzcon3=c3c4tz3*conz1*tz3; zzcon4=c3c4tz3*con16*tz3; zzcon5=c3c4tz3*c1c5*tz3; /* */ cudaMemcpyToSymbol(constants_device::ce, &ce, 13*5*sizeof(double)); cudaMemcpyToSymbol(constants_device::dt, &dt, sizeof(double)); cudaMemcpyToSymbol(constants_device::bt, &bt, sizeof(double)); cudaMemcpyToSymbol(constants_device::c1, &c1, sizeof(double)); cudaMemcpyToSymbol(constants_device::c2, &c2, sizeof(double)); cudaMemcpyToSymbol(constants_device::c3, &c3, sizeof(double)); cudaMemcpyToSymbol(constants_device::c4, &c4, sizeof(double)); cudaMemcpyToSymbol(constants_device::c5, &c5, sizeof(double)); cudaMemcpyToSymbol(constants_device::dnxm1, &dnxm1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dnym1, &dnym1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dnzm1, &dnzm1, sizeof(double)); cudaMemcpyToSymbol(constants_device::c1c2, &c1c2, sizeof(double)); cudaMemcpyToSymbol(constants_device::c1c5, &c1c5, sizeof(double)); cudaMemcpyToSymbol(constants_device::c3c4, &c3c4, sizeof(double)); cudaMemcpyToSymbol(constants_device::c1345, &c1345, sizeof(double)); cudaMemcpyToSymbol(constants_device::conz1, &conz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::tx1, &tx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::tx2, &tx2, sizeof(double)); cudaMemcpyToSymbol(constants_device::tx3, &tx3, sizeof(double)); cudaMemcpyToSymbol(constants_device::ty1, &ty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::ty2, &ty2, sizeof(double)); cudaMemcpyToSymbol(constants_device::ty3, &ty3, sizeof(double)); cudaMemcpyToSymbol(constants_device::tz1, &tz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::tz2, &tz2, sizeof(double)); cudaMemcpyToSymbol(constants_device::tz3, &tz3, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx1, &dx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx2, &dx2, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx3, &dx3, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx4, &dx4, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx5, &dx5, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy1, &dy1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy2, &dy2, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy3, &dy3, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy4, &dy4, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy5, &dy5, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz1, &dz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz2, &dz2, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz3, &dz3, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz4, &dz4, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz5, &dz5, sizeof(double)); cudaMemcpyToSymbol(constants_device::dxmax, &dxmax, sizeof(double)); cudaMemcpyToSymbol(constants_device::dymax, &dymax, sizeof(double)); cudaMemcpyToSymbol(constants_device::dzmax, &dzmax, sizeof(double)); cudaMemcpyToSymbol(constants_device::dssp, &dssp, sizeof(double)); cudaMemcpyToSymbol(constants_device::c4dssp, &c4dssp, sizeof(double)); cudaMemcpyToSymbol(constants_device::c5dssp, &c5dssp, sizeof(double)); cudaMemcpyToSymbol(constants_device::dttx1, &dttx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dttx2, &dttx2, sizeof(double)); cudaMemcpyToSymbol(constants_device::dtty1, &dtty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dtty2, &dtty2, sizeof(double)); cudaMemcpyToSymbol(constants_device::dttz1, &dttz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dttz2, &dttz2, sizeof(double)); cudaMemcpyToSymbol(constants_device::c2dttx1, &c2dttx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::c2dtty1, &c2dtty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::c2dttz1, &c2dttz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dtdssp, &dtdssp, sizeof(double)); cudaMemcpyToSymbol(constants_device::comz1, &comz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::comz4, &comz4, sizeof(double)); cudaMemcpyToSymbol(constants_device::comz5, &comz5, sizeof(double)); cudaMemcpyToSymbol(constants_device::comz6, &comz6, sizeof(double)); cudaMemcpyToSymbol(constants_device::c3c4tx3, &c3c4tx3, sizeof(double)); cudaMemcpyToSymbol(constants_device::c3c4ty3, &c3c4ty3, sizeof(double)); cudaMemcpyToSymbol(constants_device::c3c4tz3, &c3c4tz3, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx1tx1, &dx1tx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx2tx1, &dx2tx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx3tx1, &dx3tx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx4tx1, &dx4tx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dx5tx1, &dx5tx1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy1ty1, &dy1ty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy2ty1, &dy2ty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy3ty1, &dy3ty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy4ty1, &dy4ty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dy5ty1, &dy5ty1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz1tz1, &dz1tz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz2tz1, &dz2tz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz3tz1, &dz3tz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz4tz1, &dz4tz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::dz5tz1, &dz5tz1, sizeof(double)); cudaMemcpyToSymbol(constants_device::c2iv, &c2iv, sizeof(double)); cudaMemcpyToSymbol(constants_device::con43, &con43, sizeof(double)); cudaMemcpyToSymbol(constants_device::con16, &con16, sizeof(double)); cudaMemcpyToSymbol(constants_device::xxcon1, &xxcon1, sizeof(double)); cudaMemcpyToSymbol(constants_device::xxcon2, &xxcon2, sizeof(double)); cudaMemcpyToSymbol(constants_device::xxcon3, &xxcon3, sizeof(double)); cudaMemcpyToSymbol(constants_device::xxcon4, &xxcon4, sizeof(double)); cudaMemcpyToSymbol(constants_device::xxcon5, &xxcon5, sizeof(double)); cudaMemcpyToSymbol(constants_device::yycon1, &yycon1, sizeof(double)); cudaMemcpyToSymbol(constants_device::yycon2, &yycon2, sizeof(double)); cudaMemcpyToSymbol(constants_device::yycon3, &yycon3, sizeof(double)); cudaMemcpyToSymbol(constants_device::yycon4, &yycon4, sizeof(double)); cudaMemcpyToSymbol(constants_device::yycon5, &yycon5, sizeof(double)); cudaMemcpyToSymbol(constants_device::zzcon1, &zzcon1, sizeof(double)); cudaMemcpyToSymbol(constants_device::zzcon2, &zzcon2, sizeof(double)); cudaMemcpyToSymbol(constants_device::zzcon3, &zzcon3, sizeof(double)); cudaMemcpyToSymbol(constants_device::zzcon4, &zzcon4, sizeof(double)); cudaMemcpyToSymbol(constants_device::zzcon5, &zzcon5, sizeof(double)); } static void setup_gpu(){ /* * struct cudaDeviceProp{ * char name[256]; * size_t totalGlobalMem; * size_t sharedMemPerBlock; * int regsPerBlock; * int warpSize; * size_t memPitch; * int maxThreadsPerBlock; * int maxThreadsDim[3]; * int maxGridSize[3]; * size_t totalConstMem; * int major; * int minor; * int clockRate; * size_t textureAlignment; * int deviceOverlap; * int multiProcessorCount; * int kernelExecTimeoutEnabled; * int integrated; * int canMapHostMemory; * int computeMode; * int concurrentKernels; * int ECCEnabled; * int pciBusID; * int pciDeviceID; * int tccDriver; * } */ /* amount of available devices */ cudaGetDeviceCount(&total_devices); /* define gpu_device */ if(total_devices==0){ printf("\n\n\nNo Nvidia GPU found!\n\n\n"); exit(-1); }else if((GPU_DEVICE>=0)&& (GPU_DEVICE<total_devices)){ gpu_device_id = GPU_DEVICE; }else{ gpu_device_id = 0; } cudaSetDevice(gpu_device_id); cudaGetDeviceProperties(&gpu_device_properties, gpu_device_id); /* define threads_per_block */ if((SP_THREADS_PER_BLOCK_ON_ADD>=1)&& (SP_THREADS_PER_BLOCK_ON_ADD<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_ADD = SP_THREADS_PER_BLOCK_ON_ADD; } else{ THREADS_PER_BLOCK_ON_ADD = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_1>=1)&& (SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_COMPUTE_RHS_1 = SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_1; } else{ THREADS_PER_BLOCK_ON_COMPUTE_RHS_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_2>=1)&& (SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_COMPUTE_RHS_2 = SP_THREADS_PER_BLOCK_ON_COMPUTE_RHS_2; } else{ THREADS_PER_BLOCK_ON_COMPUTE_RHS_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_ERROR_NORM_1>=1)&& (SP_THREADS_PER_BLOCK_ON_ERROR_NORM_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_ERROR_NORM_1 = SP_THREADS_PER_BLOCK_ON_ERROR_NORM_1; } else{ THREADS_PER_BLOCK_ON_ERROR_NORM_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_ERROR_NORM_2>=1)&& (SP_THREADS_PER_BLOCK_ON_ERROR_NORM_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_ERROR_NORM_2 = SP_THREADS_PER_BLOCK_ON_ERROR_NORM_2; } else{ THREADS_PER_BLOCK_ON_ERROR_NORM_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_1>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_1 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_1; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_2>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_2 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_2; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_3>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_3<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_3 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_3; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_3 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_EXACT_RHS_4>=1)&& (SP_THREADS_PER_BLOCK_ON_EXACT_RHS_4<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_EXACT_RHS_4 = SP_THREADS_PER_BLOCK_ON_EXACT_RHS_4; } else{ THREADS_PER_BLOCK_ON_EXACT_RHS_4=gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_INITIALIZE>=1)&& (SP_THREADS_PER_BLOCK_ON_INITIALIZE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_INITIALIZE = SP_THREADS_PER_BLOCK_ON_INITIALIZE; } else{ THREADS_PER_BLOCK_ON_INITIALIZE=gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_RHS_NORM_1>=1)&& (SP_THREADS_PER_BLOCK_ON_RHS_NORM_1<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_RHS_NORM_1 = SP_THREADS_PER_BLOCK_ON_RHS_NORM_1; } else{ THREADS_PER_BLOCK_ON_RHS_NORM_1 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_RHS_NORM_2>=1)&& (SP_THREADS_PER_BLOCK_ON_RHS_NORM_2<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_RHS_NORM_2 = SP_THREADS_PER_BLOCK_ON_RHS_NORM_2; } else{ THREADS_PER_BLOCK_ON_RHS_NORM_2 = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_TXINVR>=1)&& (SP_THREADS_PER_BLOCK_ON_TXINVR<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_TXINVR = SP_THREADS_PER_BLOCK_ON_TXINVR; } else{ THREADS_PER_BLOCK_ON_TXINVR = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_X_SOLVE>=1)&& (SP_THREADS_PER_BLOCK_ON_X_SOLVE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_X_SOLVE = SP_THREADS_PER_BLOCK_ON_X_SOLVE; } else{ THREADS_PER_BLOCK_ON_X_SOLVE = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_Y_SOLVE>=1)&& (SP_THREADS_PER_BLOCK_ON_Y_SOLVE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_Y_SOLVE = SP_THREADS_PER_BLOCK_ON_Y_SOLVE; } else{ THREADS_PER_BLOCK_ON_Y_SOLVE = gpu_device_properties.warpSize; } if((SP_THREADS_PER_BLOCK_ON_Z_SOLVE>=1)&& (SP_THREADS_PER_BLOCK_ON_Z_SOLVE<=gpu_device_properties.maxThreadsPerBlock)){ THREADS_PER_BLOCK_ON_Z_SOLVE = SP_THREADS_PER_BLOCK_ON_Z_SOLVE; } else{ THREADS_PER_BLOCK_ON_Z_SOLVE = gpu_device_properties.warpSize; } int gridsize=nx*ny*nz; int facesize=max(max(nx*ny, nx*nz), ny*nz); size_u_device=sizeof(double)*(5*gridsize); size_forcing_device=sizeof(double)*(5*gridsize); size_rhs_device=sizeof(double)*(5*gridsize); size_rho_i_device=sizeof(double)*(gridsize); size_us_device=sizeof(double)*(gridsize); size_vs_device=sizeof(double)*(gridsize); size_ws_device=sizeof(double)*(gridsize); size_qs_device=sizeof(double)*(gridsize); size_speed_device=sizeof(double)*(gridsize); size_square_device=sizeof(double)*(gridsize); size_lhs_device=sizeof(double)*(9*gridsize); size_rhs_buffer_device=sizeof(double)*(5*gridsize); size_rms_buffer_device=sizeof(double)*(5*facesize); cudaMalloc(&u_device, size_u_device); cudaMalloc(&forcing_device, size_forcing_device); cudaMalloc(&rhs_device, size_rhs_device); cudaMalloc(&rho_i_device, size_rho_i_device); cudaMalloc(&us_device, size_us_device); cudaMalloc(&vs_device, size_vs_device); cudaMalloc(&ws_device, size_ws_device); cudaMalloc(&qs_device, size_qs_device); cudaMalloc(&speed_device, size_speed_device); cudaMalloc(&square_device, size_square_device); cudaMalloc(&lhs_device, size_lhs_device); cudaMalloc(&rhs_buffer_device, size_rhs_buffer_device); cudaMalloc(&rms_buffer_device, size_rms_buffer_device); } /* * --------------------------------------------------------------------- * block-diagonal matrix-vector multiplication * --------------------------------------------------------------------- */ static void txinvr_gpu(){ #if defined(PROFILING) timer_start(PROFILING_TXINVR); #endif /* #KERNEL TXINVR */ int txinvr_workload = nx * ny * nz; int txinvr_threads_per_block = THREADS_PER_BLOCK_ON_TXINVR; int txinvr_blocks_per_grid = (ceil((double)txinvr_workload/(double)txinvr_threads_per_block)); txinvr_gpu_kernel<<< txinvr_blocks_per_grid, txinvr_threads_per_block>>>( rho_i_device, us_device, vs_device, ws_device, speed_device, qs_device, rhs_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_TXINVR); #endif } __global__ static void txinvr_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, double* rhs, const int nx, const int ny, const int nz){ int i_j_k, i, j, k; i_j_k = blockIdx.x * blockDim.x + threadIdx.x; i = i_j_k % nx; j = (i_j_k / nx) % ny; k = i_j_k / (nx * ny); if(i_j_k >= (nx*ny*nz)){ return; } using namespace constants_device; double ru1=rho_i(i,j,k); double uu=us(i,j,k); double vv=vs(i,j,k); double ww=ws(i,j,k); double ac=speed(i,j,k); double ac2inv=1.0/(ac*ac); double r1=rhs(0,i,j,k); double r2=rhs(1,i,j,k); double r3=rhs(2,i,j,k); double r4=rhs(3,i,j,k); double r5=rhs(4,i,j,k); double t1=c2*ac2inv*(qs(i,j,k)*r1-uu*r2-vv*r3-ww*r4+r5); double t2=bt*ru1*(uu*r1-r2); double t3=(bt*ru1*ac)*t1; rhs(0,i,j,k)=r1-t1; rhs(1,i,j,k)=-ru1*(ww*r1-r4); rhs(2,i,j,k)=ru1*(vv*r1-r3); rhs(3,i,j,k)=-t2+t3; rhs(4,i,j,k)=t2+t3; } /* * --------------------------------------------------------------------- * verification routine * --------------------------------------------------------------------- */ static void verify_gpu(int no_time_steps, char* class_npb, boolean* verified){ double dt=dt_host; double xcrref[5], xceref[5], xcrdif[5], xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /* * --------------------------------------------------------------------- * tolerance level * --------------------------------------------------------------------- */ epsilon=1.0e-08; /* * --------------------------------------------------------------------- * compute the error norm and the residual norm, and exit if not printing * --------------------------------------------------------------------- */ error_norm_gpu(xce); compute_rhs_gpu(); rhs_norm_gpu(xcr); for(m=0;m<5;m++){xcr[m]=xcr[m]/dt;} *class_npb='U'; *verified=TRUE; for(m=0;m<5;m++){xcrref[m]=1.0;xceref[m]=1.0;} /* * --------------------------------------------------------------------- * reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02 * --------------------------------------------------------------------- */ if((grid_points[0]==12)&&(grid_points[1]==12)&&(grid_points[2]==12)&&(no_time_steps==100)){ *class_npb='S'; dtref=1.5e-2; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=2.7470315451339479e-02; xcrref[1]=1.0360746705285417e-02; xcrref[2]=1.6235745065095532e-02; xcrref[3]=1.5840557224455615e-02; xcrref[4]=3.4849040609362460e-02; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=2.7289258557377227e-05; xceref[1]=1.0364446640837285e-05; xceref[2]=1.6154798287166471e-05; xceref[3]=1.5750704994480102e-05; xceref[4]=3.4177666183390531e-05; /* * --------------------------------------------------------------------- * reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==36)&&(grid_points[1]==36)&&(grid_points[2]==36)&&(no_time_steps==400)){ *class_npb='W'; dtref=1.5e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.1893253733584e-02; xcrref[1]=0.1717075447775e-03; xcrref[2]=0.2778153350936e-03; xcrref[3]=0.2887475409984e-03; xcrref[4]=0.3143611161242e-02; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.7542088599534e-04; xceref[1]=0.6512852253086e-05; xceref[2]=0.1049092285688e-04; xceref[3]=0.1128838671535e-04; xceref[4]=0.1212845639773e-03; /* * --------------------------------------------------------------------- * reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==64)&&(grid_points[1]==64)&&(grid_points[2]==64)&&(no_time_steps==400)){ *class_npb='A'; dtref=1.5e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual. * --------------------------------------------------------------------- */ xcrref[0]=2.4799822399300195; xcrref[1]=1.1276337964368832; xcrref[2]=1.5028977888770491; xcrref[3]=1.4217816211695179; xcrref[4]=2.1292113035138280; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error. * --------------------------------------------------------------------- */ xceref[0]=1.0900140297820550e-04; xceref[1]=3.7343951769282091e-05; xceref[2]=5.0092785406541633e-05; xceref[3]=4.7671093939528255e-05; xceref[4]=1.3621613399213001e-04; /* * --------------------------------------------------------------------- * reference data for 102X102X102 grids after 400 time steps, * with DT = 1.0d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==102)&&(grid_points[1]==102)&&(grid_points[2]==102)&&(no_time_steps==400)){ *class_npb='B'; dtref=1.0e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.6903293579998e+02; xcrref[1]=0.3095134488084e+02; xcrref[2]=0.4103336647017e+02; xcrref[3]=0.3864769009604e+02; xcrref[4]=0.5643482272596e+02; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.9810006190188e-02; xceref[1]=0.1022827905670e-02; xceref[2]=0.1720597911692e-02; xceref[3]=0.1694479428231e-02; xceref[4]=0.1847456263981e-01; /* * --------------------------------------------------------------------- * reference data for 162X162X162 grids after 400 time steps, * with DT = 0.67d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==162)&&(grid_points[1]==162)&&(grid_points[2]==162)&&(no_time_steps==400)){ *class_npb='C'; dtref=0.67e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.5881691581829e+03; xcrref[1]=0.2454417603569e+03; xcrref[2]=0.3293829191851e+03; xcrref[3]=0.3081924971891e+03; xcrref[4]=0.4597223799176e+03; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.2598120500183e+00; xceref[1]=0.2590888922315e-01; xceref[2]=0.5132886416320e-01; xceref[3]=0.4806073419454e-01; xceref[4]=0.5483377491301e+00; /* * --------------------------------------------------------------------- * reference data for 408X408X408 grids after 500 time steps, * with DT = 0.3d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==408)&&(grid_points[1]==408)&&(grid_points[2]==408)&&(no_time_steps==500)){ *class_npb='D'; dtref=0.30e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.1044696216887e+05; xcrref[1]=0.3204427762578e+04; xcrref[2]=0.4648680733032e+04; xcrref[3]=0.4238923283697e+04; xcrref[4]=0.7588412036136e+04; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.5089471423669e+01; xceref[1]=0.5323514855894e+00; xceref[2]=0.1187051008971e+01; xceref[3]=0.1083734951938e+01; xceref[4]=0.1164108338568e+02; /* * --------------------------------------------------------------------- * reference data for 1020X1020X1020 grids after 500 time steps, * with DT = 0.1d-03 * --------------------------------------------------------------------- */ }else if((grid_points[0]==1020)&&(grid_points[1]==1020)&&(grid_points[2]==1020)&&(no_time_steps==500)){ *class_npb='E'; dtref=0.10e-3; /* * --------------------------------------------------------------------- * reference values of RMS-norms of residual * --------------------------------------------------------------------- */ xcrref[0]=0.6255387422609e+05; xcrref[1]=0.1495317020012e+05; xcrref[2]=0.2347595750586e+05; xcrref[3]=0.2091099783534e+05; xcrref[4]=0.4770412841218e+05; /* * --------------------------------------------------------------------- * reference values of RMS-norms of solution error * --------------------------------------------------------------------- */ xceref[0]=0.6742735164909e+02; xceref[1]=0.5390656036938e+01; xceref[2]=0.1680647196477e+02; xceref[3]=0.1536963126457e+02; xceref[4]=0.1575330146156e+03; }else{ *verified=FALSE; } /* * --------------------------------------------------------------------- * verification test for residuals if gridsize is one of * the defined grid sizes above (class .ne. 'U') * --------------------------------------------------------------------- * compute the difference of solution values and the known reference values * --------------------------------------------------------------------- */ for(m=0; m<5; m++){ xcrdif[m]=fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m]=fabs((xce[m]-xceref[m])/xceref[m]); } /* * --------------------------------------------------------------------- * output the comparison of computed results to known cases * --------------------------------------------------------------------- */ if(*class_npb!='U'){ printf(" Verification being performed for class %c\n",*class_npb); printf(" accuracy setting for epsilon = %20.13E\n",epsilon); *verified=(fabs(dt-dtref)<=epsilon); if(!(*verified)){ *class_npb='U'; printf(" DT does not match the reference value of %15.8E\n",dtref); } }else{ printf(" Unknown class\n"); } if(*class_npb!='U'){ printf(" Comparison of RMS-norms of residual\n"); }else{ printf(" RMS-norms of residual\n"); } for(m=0;m<5;m++){ if(*class_npb=='U'){ printf(" %2d%20.13E\n",m+1,xcr[m]); }else if(xcrdif[m]<=epsilon){ printf(" %2d%20.13E%20.13E%20.13E\n",m+1,xcr[m],xcrref[m],xcrdif[m]); }else { *verified=FALSE; printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",m+1,xcr[m],xcrref[m],xcrdif[m]); } } if(*class_npb!='U'){ printf(" Comparison of RMS-norms of solution error\n"); }else{ printf(" RMS-norms of solution error\n"); } for(m=0;m<5;m++){ if(*class_npb=='U'){ printf(" %2d%20.13E\n",m+1,xce[m]); }else if(xcedif[m]<=epsilon){ printf(" %2d%20.13E%20.13E%20.13E\n",m+1,xce[m],xceref[m],xcedif[m]); }else{ *verified = FALSE; printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",m+1,xce[m],xceref[m],xcedif[m]); } } if(*class_npb=='U'){ printf(" No reference values provided\n"); printf(" No verification performed\n"); }else if(*verified){ printf(" Verification Successful\n"); }else{ printf(" Verification failed\n"); } } /* * --------------------------------------------------------------------- * this function performs the solution of the approximate factorization * step in the x-direction for all five matrix components * simultaneously. the thomas algorithm is employed to solve the * systems for the x-lines. boundary conditions are non-periodic * --------------------------------------------------------------------- */ static void x_solve_gpu(){ #if defined(PROFILING) timer_start(PROFILING_X_SOLVE); #endif /* #KERNEL X SOLVE */ int x_solve_threads_per_block; dim3 x_solve_blocks_per_grid(1, nz); if(THREADS_PER_BLOCK_ON_X_SOLVE != ny){ x_solve_threads_per_block = ny; } else{ x_solve_threads_per_block = THREADS_PER_BLOCK_ON_X_SOLVE; } x_solve_gpu_kernel<<< x_solve_blocks_per_grid, x_solve_threads_per_block>>>( rho_i_device, us_device, speed_device, rhs_device, lhs_device, rhs_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_X_SOLVE); #endif } __global__ static void x_solve_gpu_kernel(const double* rho_i, const double* us, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz){ #define lhs(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m-3)))] #define lhsp(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m+4)))] #define lhsm(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m-3+2)))] #define rtmp(m,i,j,k) rhstmp[(j)+ny*((k)+nz*((i)+nx*(m)))] int i, j, k, m; double rhon[3], cv[3], _lhs[3][5], _lhsp[3][5], _rhs[3][5], fac1; /* coalesced */ j=blockIdx.x*blockDim.x+threadIdx.x+1; k=blockIdx.y*blockDim.y+threadIdx.y+1; /* uncoalesced */ /* k=blockIdx.x*blockDim.x+threadIdx.x+1; */ /* j=blockIdx.y*blockDim.y+threadIdx.y+1; */ if((k>=nz-1) || (j>=ny-1)){return;} using namespace constants_device; /* * --------------------------------------------------------------------- * computes the left hand side for the three x-factors * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ _lhs[0][0]=lhsp(0,0,j,k)=0.0; _lhs[0][1]=lhsp(1,0,j,k)=0.0; _lhs[0][2]=lhsp(2,0,j,k)=1.0; _lhs[0][3]=lhsp(3,0,j,k)=0.0; _lhs[0][4]=lhsp(4,0,j,k)=0.0; for(i=0; i<3; i++){ fac1=c3c4*rho_i(i,j,k); rhon[i]=max(max(max(dx2+con43*fac1, dx5+c1c5*fac1), dxmax+fac1), dx1); cv[i]=us(i,j,k); } _lhs[1][0]=0.0; _lhs[1][1]=-dttx2*cv[0]-dttx1*rhon[0]; _lhs[1][2]=1.0+c2dttx1*rhon[1]; _lhs[1][3]=dttx2*cv[2]-dttx1*rhon[2]; _lhs[1][4]=0.0; _lhs[1][2]+=comz5; _lhs[1][3]-=comz4; _lhs[1][4]+=comz1; for(m=0; m<5; m++){lhsp(m,1,j,k)=_lhs[1][m];} rhon[0]=rhon[1]; rhon[1]=rhon[2]; cv[0]=cv[1]; cv[1]=cv[2]; for(m=0; m<3; m++){ _rhs[0][m]=rhs(m,0,j,k); _rhs[1][m]=rhs(m,1,j,k); } /* * --------------------------------------------------------------------- * FORWARD ELIMINATION * --------------------------------------------------------------------- * perform the thomas algorithm; first, FORWARD ELIMINATION * --------------------------------------------------------------------- */ for(i=0; i<nx-2; i++){ /* * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ if((i+2)==(nx-1)){ _lhs[2][0]=lhsp(0,i+2,j,k)=0.0; _lhs[2][1]=lhsp(1,i+2,j,k)=0.0; _lhs[2][2]=lhsp(2,i+2,j,k)=1.0; _lhs[2][3]=lhsp(3,i+2,j,k)=0.0; _lhs[2][4]=lhsp(4,i+2,j,k)=0.0; }else{ fac1=c3c4*rho_i(i+3,j,k); rhon[2]=max(max(max(dx2+con43*fac1, dx5+c1c5*fac1), dxmax+fac1), dx1); cv[2]=us(i+3,j,k); _lhs[2][0]=0.0; _lhs[2][1]=-dttx2*cv[0]-dttx1*rhon[0]; _lhs[2][2]=1.0+c2dttx1*rhon[1]; _lhs[2][3]=dttx2*cv[2]-dttx1*rhon[2]; _lhs[2][4]=0.0; /* * --------------------------------------------------------------------- * add fourth order dissipation * --------------------------------------------------------------------- */ if((i+2)==(2)){ _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((i+2>=3) && (i+2<nx-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((i+2)==(nx-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; }else if((i+2)==(nx-2)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz5; } /* * --------------------------------------------------------------------- * store computed lhs for later reuse * --------------------------------------------------------------------- */ for(m=0;m<5;m++){lhsp(m,i+2,j,k)=_lhs[2][m];} rhon[0]=rhon[1]; rhon[1]=rhon[2]; cv[0]=cv[1]; cv[1]=cv[2]; } /* * --------------------------------------------------------------------- * load rhs values for current iteration * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[2][m]=rhs(m,i+2,j,k);} /* * --------------------------------------------------------------------- * perform current iteration * --------------------------------------------------------------------- */ fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[2][m]-=_lhs[2][0]*_rhs[0][m];} /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; for(m=0; m<5; m++){ _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } for(m=0; m<3; m++){ rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } } /* * --------------------------------------------------------------------- * the last two rows in this zone are a bit different, * since they do not have two more rows available for the * elimination of off-diagonal entries * --------------------------------------------------------------------- */ i=nx-2; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ fac1=1.0/_lhs[1][2]; for(m=0;m<3;m++){_rhs[1][m]*=fac1;} lhs(3,nx-2,j,k)=_lhs[0][3]; lhs(4,nx-2,j,k)=_lhs[0][4]; /* * --------------------------------------------------------------------- * subsequently, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(i=0;i<3;i++){cv[i]=speed(i,j,k);} for(m=0; m<5; m++){ _lhsp[0][m]=_lhs[0][m]=lhsp(m,0,j,k); _lhsp[1][m]=_lhs[1][m]=lhsp(m,1,j,k); } _lhsp[1][1]-= dttx2*cv[0]; _lhsp[1][3]+=dttx2*cv[2]; _lhs[1][1]+=dttx2*cv[0]; _lhs[1][3]-=dttx2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; _rhs[0][3]=rhs(3,0,j,k); _rhs[0][4]=rhs(4,0,j,k); _rhs[1][3]=rhs(3,1,j,k); _rhs[1][4]=rhs(4,1,j,k); /* * --------------------------------------------------------------------- * do the u+c and the u-c factors * --------------------------------------------------------------------- */ for(i=0; i<nx-2; i++){ /* * first, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(m=0; m<5; m++){ _lhsp[2][m]=_lhs[2][m]=lhsp(m,i+2,j,k); } _rhs[2][3]=rhs(3,i+2,j,k); _rhs[2][4]=rhs(4,i+2,j,k); if((i+2)<(nx-1)){ cv[2]=speed(i+3,j,k); _lhsp[2][1]-=dttx2*cv[0]; _lhsp[2][3]+=dttx2*cv[2]; _lhs[2][1]+=dttx2*cv[0]; _lhs[2][3]-=dttx2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; } m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; _lhsp[2][1]-=_lhsp[2][0]*_lhsp[0][3]; _lhsp[2][2]-=_lhsp[2][0]*_lhsp[0][4]; _rhs[2][m]-=_lhsp[2][0]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; _rhs[2][m]-=_lhs[2][0]*_rhs[0][m]; /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ for(m=3; m<5; m++){ lhsp(m,i,j,k)=_lhsp[0][m]; lhsm(m,i,j,k)=_lhs[0][m]; rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } for(m=0; m<5; m++){ _lhsp[0][m]=_lhsp[1][m]; _lhsp[1][m]=_lhsp[2][m]; _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } } /* * --------------------------------------------------------------------- * and again the last two rows separately * --------------------------------------------------------------------- */ i=nx-2; m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ _rhs[1][3]/=_lhsp[1][2]; _rhs[1][4]/=_lhs[1][2]; /* * --------------------------------------------------------------------- * BACKSUBSTITUTION * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]-=lhs(3,nx-2,j,k)*_rhs[1][m];} _rhs[0][3]-=_lhsp[0][3]*_rhs[1][3]; _rhs[0][4]-=_lhs[0][3]*_rhs[1][4]; for(m=0; m<5; m++){ _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } for(i=nx-3; i>=0; i--){ /* * --------------------------------------------------------------------- * the first three factors * --------------------------------------------------------------------- */ for(m=0; m<3; m++){_rhs[0][m]=rtmp(m,i,j,k)-lhs(3,i,j,k)*_rhs[1][m]-lhs(4,i,j,k)*_rhs[2][m];} /* * --------------------------------------------------------------------- * and the remaining two * --------------------------------------------------------------------- */ _rhs[0][3]=rtmp(3,i,j,k)-lhsp(3,i,j,k)*_rhs[1][3]-lhsp(4,i,j,k)*_rhs[2][3]; _rhs[0][4]=rtmp(4,i,j,k)-lhsm(3,i,j,k)*_rhs[1][4]-lhsm(4,i,j,k)*_rhs[2][4]; if(i+2<nx-1){ /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double r1=_rhs[2][0]; double r2=_rhs[2][1]; double r3=_rhs[2][2]; double r4=_rhs[2][3]; double r5=_rhs[2][4]; double t1=bt*r3; double t2=0.5*(r4+r5); _rhs[2][0]=-r2; _rhs[2][1]=r1; _rhs[2][2]=bt*(r4-r5); _rhs[2][3]=-t1+t2; _rhs[2][4]=t1+t2; } for(m=0; m<5; m++){ rhs(m,i+2,j,k)=_rhs[2][m]; _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } } /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double t1=bt*_rhs[2][2]; double t2=0.5*(_rhs[2][3]+_rhs[2][4]); rhs(0,1,j,k)=-_rhs[2][1]; rhs(1,1,j,k)=_rhs[2][0]; rhs(2,1,j,k)=bt*(_rhs[2][3]-_rhs[2][4]); rhs(3,1,j,k)=-t1+t2; rhs(4,1,j,k)=t1+t2; for(m=0;m<5;m++){rhs(m,0,j,k)=_rhs[1][m];} #undef lhs #undef lhsp #undef lhsm #undef rtmp } /* * --------------------------------------------------------------------- * this function performs the solution of the approximate factorization * step in the y-direction for all five matrix components * simultaneously. the thomas algorithm is employed to solve the * systems for the y-lines. boundary conditions are non-periodic * --------------------------------------------------------------------- */ static void y_solve_gpu(){ #if defined(PROFILING) timer_start(PROFILING_Y_SOLVE); #endif /* #KERNEL Y SOLVE */ int y_solve_threads_per_block; dim3 y_solve_blocks_per_grid(1, nz); if(THREADS_PER_BLOCK_ON_Y_SOLVE != nx){ y_solve_threads_per_block = nx; } else{ y_solve_threads_per_block = THREADS_PER_BLOCK_ON_Y_SOLVE; } y_solve_gpu_kernel<<< y_solve_blocks_per_grid, y_solve_threads_per_block>>>( rho_i_device, vs_device, speed_device, rhs_device, lhs_device, rhs_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_Y_SOLVE); #endif } __global__ static void y_solve_gpu_kernel(const double* rho_i, const double* vs, const double* speed, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz){ #define lhs(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m-3)))] #define lhsp(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m+4)))] #define lhsm(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m-3+2)))] #define rtmp(m,i,j,k) rhstmp[(i)+nx*((k)+nz*((j)+ny*(m)))] int i, j, k, m; double rhoq[3], cv[3], _lhs[3][5], _lhsp[3][5], _rhs[3][5], fac1; /* coalesced */ i=blockIdx.x*blockDim.x+threadIdx.x+1; k=blockIdx.y*blockDim.y+threadIdx.y+1; /* uncoalesced */ /* k=blockIdx.x*blockDim.x+threadIdx.x+1; */ /* i=blockIdx.y*blockDim.y+threadIdx.y+1; */ if((k>=(nz-1))||(i>=(nx-1))){return;} using namespace constants_device; /* * --------------------------------------------------------------------- * computes the left hand side for the three y-factors * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ _lhs[0][0]=lhsp(0,i,0,k)=0.0; _lhs[0][1]=lhsp(1,i,0,k)=0.0; _lhs[0][2]=lhsp(2,i,0,k)=1.0; _lhs[0][3]=lhsp(3,i,0,k)=0.0; _lhs[0][4]=lhsp(4,i,0,k)=0.0; for(j=0; j<3; j++){ fac1=c3c4*rho_i(i,j,k); rhoq[j]=max(max(max(dy3+con43*fac1, dy5+c1c5*fac1), dymax+fac1), dy1); cv[j]=vs(i,j,k); } _lhs[1][0]=0.0; _lhs[1][1]=-dtty2*cv[0]-dtty1*rhoq[0]; _lhs[1][2]=1.0+c2dtty1*rhoq[1]; _lhs[1][3]=dtty2*cv[2]-dtty1*rhoq[2]; _lhs[1][4]=0.0; _lhs[1][2]+=comz5; _lhs[1][3]-=comz4; _lhs[1][4]+=comz1; for(m=0;m<5;m++){lhsp(m,i,1,k)=_lhs[1][m];} rhoq[0]=rhoq[1]; rhoq[1]=rhoq[2]; cv[0]=cv[1]; cv[1]=cv[2]; for(m=0; m<3; m++){ _rhs[0][m]=rhs(m,i,0,k); _rhs[1][m]=rhs(m,i,1,k); } /* * --------------------------------------------------------------------- * FORWARD ELIMINATION * --------------------------------------------------------------------- */ for(j=0; j<ny-2; j++){ /* * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ if((j+2)==(ny-1)){ _lhs[2][0]=lhsp(0,i,j+2,k)=0.0; _lhs[2][1]=lhsp(1,i,j+2,k)=0.0; _lhs[2][2]=lhsp(2,i,j+2,k)=1.0; _lhs[2][3]=lhsp(3,i,j+2,k)=0.0; _lhs[2][4]=lhsp(4,i,j+2,k)=0.0; }else{ fac1=c3c4*rho_i(i,j+3,k); rhoq[2]=max(max(max(dy3+con43*fac1, dy5+c1c5*fac1), dymax+fac1), dy1); cv[2]=vs(i,j+3,k); _lhs[2][0]=0.0; _lhs[2][1]=-dtty2*cv[0]-dtty1*rhoq[0]; _lhs[2][2]=1.0+c2dtty1*rhoq[1]; _lhs[2][3]=dtty2*cv[2]-dtty1*rhoq[2]; _lhs[2][4]=0.0; /* * --------------------------------------------------------------------- * add fourth order dissipation * --------------------------------------------------------------------- */ if((j+2)==(2)){ _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if(((j+2)>=(3))&&((j+2)<(ny-3))){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((j+2)==(ny-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; }else if((j+2)==(ny-2)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz5; } /* * --------------------------------------------------------------------- * store computed lhs for later reuse * --------------------------------------------------------------------- */ for(m=0;m<5;m++){lhsp(m,i,j+2,k)=_lhs[2][m];} rhoq[0]=rhoq[1]; rhoq[1]=rhoq[2]; cv[0]=cv[1]; cv[1]=cv[2]; } /* * --------------------------------------------------------------------- * load rhs values for current iteration * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[2][m]=rhs(m,i,j+2,k);} /* * --------------------------------------------------------------------- * perform current iteration * --------------------------------------------------------------------- */ fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[2][m]-=_lhs[2][0]*_rhs[0][m];} /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; for(m=0; m<5; m++){ _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } for(m=0; m<3; m++){ rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } } /* * --------------------------------------------------------------------- * the last two rows in this zone are a bit different, * since they do not have two more rows available for the * elimination of off-diagonal entries * --------------------------------------------------------------------- */ j=ny-2; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ fac1=1.0/_lhs[1][2]; for(m=0;m<3;m++){_rhs[1][m]*=fac1;} lhs(3,i,ny-2,k)=_lhs[0][3]; lhs(4,i,ny-2,k)=_lhs[0][4]; /* * --------------------------------------------------------------------- * do the u+c and the u-c factors * --------------------------------------------------------------------- */ for(j=0;j<3;j++){cv[j]=speed(i,j,k);} for(m=0; m<5; m++){ _lhsp[0][m]=_lhs[0][m]=lhsp(m,i,0,k); _lhsp[1][m]=_lhs[1][m]=lhsp(m,i,1,k); } _lhsp[1][1]-=dtty2*cv[0]; _lhsp[1][3]+=dtty2*cv[2]; _lhs[1][1]+=dtty2*cv[0]; _lhs[1][3]-=dtty2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; _rhs[0][3]=rhs(3,i,0,k); _rhs[0][4]=rhs(4,i,0,k); _rhs[1][3]=rhs(3,i,1,k); _rhs[1][4]=rhs(4,i,1,k); for(j=0; j<ny-2; j++){ for(m=0; m<5; m++){ _lhsp[2][m]=_lhs[2][m]=lhsp(m,i,j+2,k); } _rhs[2][3]=rhs(3,i,j+2,k); _rhs[2][4]=rhs(4,i,j+2,k); if((j+2)<(ny-1)){ cv[2]=speed(i,j+3,k); _lhsp[2][1]-=dtty2*cv[0]; _lhsp[2][3]+=dtty2*cv[2]; _lhs[2][1]+=dtty2*cv[0]; _lhs[2][3]-=dtty2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; } fac1=1.0/_lhsp[0][2]; m=3; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; _lhsp[2][1]-=_lhsp[2][0]*_lhsp[0][3]; _lhsp[2][2]-=_lhsp[2][0]*_lhsp[0][4]; _rhs[2][m]-=_lhsp[2][0]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; _rhs[2][m]-=_lhs[2][0]*_rhs[0][m]; /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ for(m=3; m<5; m++){ lhsp(m,i,j,k)=_lhsp[0][m]; lhsm(m,i,j,k)=_lhs[0][m]; rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } for(m=0; m<5; m++){ _lhsp[0][m]=_lhsp[1][m]; _lhsp[1][m]=_lhsp[2][m]; _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } } /* * --------------------------------------------------------------------- * and again the last two rows separately * --------------------------------------------------------------------- */ j=ny-2; m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ _rhs[1][3]/=_lhsp[1][2]; _rhs[1][4]/=_lhs[1][2]; /* * --------------------------------------------------------------------- * BACKSUBSTITUTION * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]-=lhs(3,i,ny-2,k)*_rhs[1][m];} _rhs[0][3]-=_lhsp[0][3]*_rhs[1][3]; _rhs[0][4]-=_lhs[0][3]*_rhs[1][4]; for(m=0; m<5; m++){ _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } for(j=ny-3; j>=0; j--){ /* * --------------------------------------------------------------------- * the first three factors * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]=rtmp(m,i,j,k)-lhs(3,i,j,k)*_rhs[1][m]-lhs(4,i,j,k)*_rhs[2][m];} /* * --------------------------------------------------------------------- * and the remaining two * --------------------------------------------------------------------- */ _rhs[0][3]=rtmp(3,i,j,k)-lhsp(3,i,j,k)*_rhs[1][3]-lhsp(4,i,j,k)*_rhs[2][3]; _rhs[0][4]=rtmp(4,i,j,k)-lhsm(3,i,j,k)*_rhs[1][4]-lhsm(4,i,j,k)*_rhs[2][4]; if((j+2)<(ny-1)){ /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double r1=_rhs[2][0]; double r2=_rhs[2][1]; double r3=_rhs[2][2]; double r4=_rhs[2][3]; double r5=_rhs[2][4]; double t1=bt*r1; double t2=0.5*(r4+r5); _rhs[2][0]=bt*(r4-r5); _rhs[2][1]=-r3; _rhs[2][2]=r2; _rhs[2][3]=-t1+t2; _rhs[2][4]=t1+t2; } for(m=0; m<5; m++){ rhs(m,i,j+2,k)=_rhs[2][m]; _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } } /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double t1=bt*_rhs[2][0]; double t2=0.5*(_rhs[2][3]+_rhs[2][4]); rhs(0,i,1,k)=bt*(_rhs[2][3]-_rhs[2][4]); rhs(1,i,1,k)=-_rhs[2][2]; rhs(2,i,1,k)=_rhs[2][1]; rhs(3,i,1,k)=-t1+t2; rhs(4,i,1,k)=t1+t2; for(m=0;m<5;m++){rhs(m,i,0,k)=_rhs[1][m];} #undef lhs #undef lhsp #undef lhsm #undef rtmp } /* * --------------------------------------------------------------------- * this function performs the solution of the approximate factorization * step in the z-direction for all five matrix components * simultaneously. The Thomas algorithm is employed to solve the * systems for the z-lines. Boundary conditions are non-periodic * --------------------------------------------------------------------- */ static void z_solve_gpu(){ #if defined(PROFILING) timer_start(PROFILING_Z_SOLVE); #endif /* #KERNEL Z SOLVE */ int z_solve_threads_per_block; dim3 z_solve_blocks_per_grid(1, ny); if(THREADS_PER_BLOCK_ON_Z_SOLVE != nx){ z_solve_threads_per_block = nx; } else{ z_solve_threads_per_block = THREADS_PER_BLOCK_ON_Z_SOLVE; } z_solve_gpu_kernel<<< z_solve_blocks_per_grid, z_solve_threads_per_block>>>( rho_i_device, us_device, vs_device, ws_device, speed_device, qs_device, u_device, rhs_device, lhs_device, rhs_buffer_device, nx, ny, nz); #if defined(PROFILING) timer_stop(PROFILING_Z_SOLVE); #endif } __global__ static void z_solve_gpu_kernel(const double* rho_i, const double* us, const double* vs, const double* ws, const double* speed, const double* qs, const double* u, double* rhs, double* lhs, double* rhstmp, const int nx, const int ny, const int nz){ #define lhs(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m-3)))] #define lhsp(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m+4)))] #define lhsm(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m-3+2)))] #define rtmp(m,i,j,k) rhstmp[(i)+nx*((j)+ny*((k)+nz*(m)))] int i, j, k, m; double rhos[3], cv[3], _lhs[3][5], _lhsp[3][5], _rhs[3][5], fac1; /* coalesced */ i=blockIdx.x*blockDim.x+threadIdx.x+1; j=blockIdx.y*blockDim.y+threadIdx.y+1; /* uncoalesced */ /* j=blockIdx.x*blockDim.x+threadIdx.x+1; */ /* i=blockIdx.y*blockDim.y+threadIdx.y+1; */ if((j>=(ny-1))||(i>=(nx-1))){return;} using namespace constants_device; /* * --------------------------------------------------------------------- * computes the left hand side for the three z-factors * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ _lhs[0][0]=lhsp(0,i,j,0)=0.0; _lhs[0][1]=lhsp(1,i,j,0)=0.0; _lhs[0][2]=lhsp(2,i,j,0)=1.0; _lhs[0][3]=lhsp(3,i,j,0)=0.0; _lhs[0][4]=lhsp(4,i,j,0)=0.0; for(k=0; k<3; k++){ fac1=c3c4*rho_i(i,j,k); rhos[k]=max(max(max(dz4+con43*fac1, dz5+c1c5*fac1), dzmax+fac1), dz1); cv[k]=ws(i,j,k); } _lhs[1][0]=0.0; _lhs[1][1]=-dttz2*cv[0]-dttz1*rhos[0]; _lhs[1][2]=1.0+c2dttz1*rhos[1]; _lhs[1][3]=dttz2*cv[2]-dttz1*rhos[2]; _lhs[1][4]=0.0; _lhs[1][2]+=comz5; _lhs[1][3]-=comz4; _lhs[1][4]+=comz1; for(m=0; m<5; m++){lhsp(m,i,j,1)=_lhs[1][m];} rhos[0]=rhos[1]; rhos[1]=rhos[2]; cv[0]=cv[1]; cv[1]=cv[2]; for(m=0; m<3; m++){ _rhs[0][m]=rhs(m,i,j,0); _rhs[1][m]=rhs(m,i,j,1); } /* * --------------------------------------------------------------------- * FORWARD ELIMINATION * --------------------------------------------------------------------- */ for(k=0; k<nz-2; k++){ /* * --------------------------------------------------------------------- * first fill the lhs for the u-eigenvalue * --------------------------------------------------------------------- */ if((k+2)==(nz-1)){ _lhs[2][0]=lhsp(0,i,j,k+2)=0.0; _lhs[2][1]=lhsp(1,i,j,k+2)=0.0; _lhs[2][2]=lhsp(2,i,j,k+2)=1.0; _lhs[2][3]=lhsp(3,i,j,k+2)=0.0; _lhs[2][4]=lhsp(4,i,j,k+2)=0.0; }else{ fac1=c3c4*rho_i(i,j,k+3); rhos[2]=max(max(max(dz4+con43*fac1, dz5+c1c5*fac1), dzmax+fac1), dz1); cv[2]=ws(i,j,k+3); _lhs[2][0]=0.0; _lhs[2][1]=-dttz2*cv[0]-dttz1*rhos[0]; _lhs[2][2]=1.0+c2dttz1*rhos[1]; _lhs[2][3]=dttz2*cv[2]-dttz1*rhos[2]; _lhs[2][4]=0.0; /* * --------------------------------------------------------------------- * add fourth order dissipation * --------------------------------------------------------------------- */ if((k+2)==(2)){ _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if(((k+2)>=(3))&&((k+2)<(nz-3))){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; _lhs[2][4]+=comz1; }else if((k+2)==(nz-3)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz6; _lhs[2][3]-=comz4; }else if((k+2)==(nz-2)){ _lhs[2][0]+=comz1; _lhs[2][1]-=comz4; _lhs[2][2]+=comz5; } /* * --------------------------------------------------------------------- * store computed lhs for later reuse * --------------------------------------------------------------------- */ for(m=0;m<5;m++){lhsp(m,i,j,k+2)=_lhs[2][m];} rhos[0]=rhos[1]; rhos[1]=rhos[2]; cv[0]=cv[1]; cv[1]=cv[2]; } /* * --------------------------------------------------------------------- * load rhs values for current iteration * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[2][m]=rhs(m,i,j,k+2);} /* * --------------------------------------------------------------------- * perform current iteration * --------------------------------------------------------------------- */ fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[2][m]-=_lhs[2][0]*_rhs[0][m];} /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; for(m=0; m<5; m++){ _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } for(m=0; m<3; m++){ rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } } /* * --------------------------------------------------------------------- * the last two rows in this zone are a bit different, * since they do not have two more rows available for the * elimination of off-diagonal entries * --------------------------------------------------------------------- */ k=nz-2; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; for(m=0;m<3;m++){_rhs[0][m]*=fac1;} _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; for(m=0;m<3;m++){_rhs[1][m]-=_lhs[1][1]*_rhs[0][m];} /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ fac1=1.0/_lhs[1][2]; for(m=0;m<3;m++){_rhs[1][m]*=fac1;} lhs(3,i,j,k)=_lhs[0][3]; lhs(4,i,j,k)=_lhs[0][4]; /* * --------------------------------------------------------------------- * subsequently, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(k=0;k<3;k++){cv[k]=speed(i,j,k);} for(m=0;m<5;m++){ _lhsp[0][m]=_lhs[0][m]=lhsp(m,i,j,0); _lhsp[1][m]=_lhs[1][m]=lhsp(m,i,j,1); } _lhsp[1][1]-=dttz2*cv[0]; _lhsp[1][3]+=dttz2*cv[2]; _lhs[1][1]+=dttz2*cv[0]; _lhs[1][3]-=dttz2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; _rhs[0][3]=rhs(3,i,j,0); _rhs[0][4]=rhs(4,i,j,0); _rhs[1][3]=rhs(3,i,j,1); _rhs[1][4]=rhs(4,i,j,1); /* * --------------------------------------------------------------------- * do the u+c and the u-c factors * --------------------------------------------------------------------- */ for(k=0; k<nz-2; k++){ /* * first, fill the other factors (u+c), (u-c) * --------------------------------------------------------------------- */ for(m=0; m<5; m++){ _lhsp[2][m]=_lhs[2][m]=lhsp(m,i,j,k+2); } _rhs[2][3]=rhs(3,i,j,k+2); _rhs[2][4]=rhs(4,i,j,k+2); if((k+2)<(nz-1)){ cv[2]=speed(i,j,k+3); _lhsp[2][1]-=dttz2*cv[0]; _lhsp[2][3]+=dttz2*cv[2]; _lhs[2][1]+=dttz2*cv[0]; _lhs[2][3]-=dttz2*cv[2]; cv[0]=cv[1]; cv[1]=cv[2]; } m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; _lhsp[2][1]-=_lhsp[2][0]*_lhsp[0][3]; _lhsp[2][2]-=_lhsp[2][0]*_lhsp[0][4]; _rhs[2][m]-=_lhsp[2][0]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*= fac1; _lhs[0][4]*= fac1; _rhs[0][m]*= fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; _lhs[2][1]-=_lhs[2][0]*_lhs[0][3]; _lhs[2][2]-=_lhs[2][0]*_lhs[0][4]; _rhs[2][m]-=_lhs[2][0]*_rhs[0][m]; /* * --------------------------------------------------------------------- * store computed lhs and prepare data for next iteration * rhs is stored in a temp array such that write accesses are coalesced * --------------------------------------------------------------------- */ for(m=3; m<5; m++){ lhsp(m,i,j,k)=_lhsp[0][m]; lhsm(m,i,j,k)=_lhs[0][m]; rtmp(m,i,j,k)=_rhs[0][m]; _rhs[0][m]=_rhs[1][m]; _rhs[1][m]=_rhs[2][m]; } for(m=0; m<5; m++){ _lhsp[0][m]=_lhsp[1][m]; _lhsp[1][m]=_lhsp[2][m]; _lhs[0][m]=_lhs[1][m]; _lhs[1][m]=_lhs[2][m]; } } /* * --------------------------------------------------------------------- * and again the last two rows separately * --------------------------------------------------------------------- */ k=nz-2; m=3; fac1=1.0/_lhsp[0][2]; _lhsp[0][3]*=fac1; _lhsp[0][4]*=fac1; _rhs[0][m]*=fac1; _lhsp[1][2]-=_lhsp[1][1]*_lhsp[0][3]; _lhsp[1][3]-=_lhsp[1][1]*_lhsp[0][4]; _rhs[1][m]-=_lhsp[1][1]*_rhs[0][m]; m=4; fac1=1.0/_lhs[0][2]; _lhs[0][3]*=fac1; _lhs[0][4]*=fac1; _rhs[0][m]*=fac1; _lhs[1][2]-=_lhs[1][1]*_lhs[0][3]; _lhs[1][3]-=_lhs[1][1]*_lhs[0][4]; _rhs[1][m]-=_lhs[1][1]*_rhs[0][m]; /* * --------------------------------------------------------------------- * scale the last row immediately * --------------------------------------------------------------------- */ _rhs[1][3]/=_lhsp[1][2]; _rhs[1][4]/=_lhs[1][2]; /* * --------------------------------------------------------------------- * BACKSUBSTITUTION * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]-=lhs(3,i,j,nz-2)*_rhs[1][m];} _rhs[0][3]-=_lhsp[0][3]*_rhs[1][3]; _rhs[0][4]-=_lhs[0][3]*_rhs[1][4]; for(m=0; m<5; m++){ _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } for(k=nz-3; k>=0; k--){ /* * --------------------------------------------------------------------- * the first three factors * --------------------------------------------------------------------- */ for(m=0;m<3;m++){_rhs[0][m]=rtmp(m,i,j,k)-lhs(3,i,j,k)*_rhs[1][m]-lhs(4,i,j,k)*_rhs[2][m];} /* * --------------------------------------------------------------------- * and the remaining two * --------------------------------------------------------------------- */ _rhs[0][3]=rtmp(3,i,j,k)-lhsp(3,i,j,k)*_rhs[1][3]-lhsp(4,i,j,k)*_rhs[2][3]; _rhs[0][4]=rtmp(4,i,j,k)-lhsm(3,i,j,k)*_rhs[1][4]-lhsm(4,i,j,k)*_rhs[2][4]; if((k+2)<(nz-1)){ /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double xvel=us(i,j,k+2); double yvel=vs(i,j,k+2); double zvel=ws(i,j,k+2); double ac=speed(i,j,k+2); double uzik1=u(0,i,j,k+2); double t1=(bt*uzik1)/ac*(_rhs[2][3]+_rhs[2][4]); double t2=_rhs[2][2]+t1; double t3=bt*uzik1*(_rhs[2][3]-_rhs[2][4]); _rhs[2][4]=uzik1*(-xvel*_rhs[2][1]+yvel*_rhs[2][0])+qs(i,j,k+2)*t2+c2iv*(ac*ac)*t1+zvel*t3; _rhs[2][3]=zvel*t2+t3; _rhs[2][2]=uzik1*_rhs[2][0]+yvel*t2; _rhs[2][1]=-uzik1*_rhs[2][1]+xvel*t2; _rhs[2][0]=t2; } for(m=0; m<5; m++){ rhs(m,i,j,k+2)=_rhs[2][m]; _rhs[2][m]=_rhs[1][m]; _rhs[1][m]=_rhs[0][m]; } } /* * --------------------------------------------------------------------- * do the block-diagonal inversion * --------------------------------------------------------------------- */ double xvel=us(i,j,1); double yvel=vs(i,j,1); double zvel=ws(i,j,1); double ac=speed(i,j,1); double uzik1=u(0,i,j,1); double t1=(bt*uzik1)/ac*(_rhs[2][3]+_rhs[2][4]); double t2=_rhs[2][2]+t1; double t3=bt*uzik1*(_rhs[2][3]-_rhs[2][4]); rhs(4,i,j,1)=uzik1*(-xvel*_rhs[2][1]+yvel*_rhs[2][0])+qs(i,j,1)*t2+c2iv*(ac*ac)*t1+zvel*t3; rhs(3,i,j,1)=zvel*t2+t3; rhs(2,i,j,1)=uzik1*_rhs[2][0]+yvel*t2; rhs(1,i,j,1)=-uzik1*_rhs[2][1]+xvel*t2; rhs(0,i,j,1)=t2; for(m=0;m<5;m++){rhs(m,i,j,0)=_rhs[1][m];} #undef lhs #undef lhsp #undef lhsm #undef rtmp }
d3ec3079d16bbd8c95db27121e0252b68f98dc15.hip
// !!! This is a file automatically generated by hipify!!! /* Vector-matrix multiplication: Y = A * X. * Host code. * Author: Naga Kandasamy * Date: 11/11/2015 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> #include "vec_mat_mult_kernel.cu" #define MIN_NUMBER 1 #define MAX_NUMBER 4 extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int); Matrix allocate_matrix_on_gpu(const Matrix); Matrix allocate_matrix(int, int, int); void copy_matrix_to_device(Matrix, const Matrix); void copy_matrix_from_device(Matrix, const Matrix); void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix); void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix); void print_matrix(const Matrix); float get_random_number(int, int); int checkResults(float *, float *, int, float); int main(int argc, char** argv) { // Matrices for the program Matrix A; // N x N matrix Matrix X; // N x 1 vector Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector // Initialize the random number generator with a seed value srand(time(NULL)); // Check command line arguments if(argc > 1){ printf("Error. This program accepts no arguments. \n"); exit(0); } // Allocate and initialize the matrices A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0); Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0); // compute the vector-matrix multiplication on the CPU for comparison compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns); // Perform the vector-matrix multiplication on the GPU using global memory // Return the results in Y_gpu_1 vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); int size_elements = NUM_ROWS; int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Perform the vector-matrix multiplication on the GPU using global memory // Return the results in Y_gpu_2 vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Free host matrices free(A.elements); A.elements = NULL; free(X.elements); X.elements = NULL; free(Y_cpu.elements); Y_cpu.elements = NULL; free(Y_gpu_1.elements); Y_gpu_1.elements = NULL; free(Y_gpu_2.elements); Y_gpu_2.elements = NULL; return 0; } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use global memory void vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y) { } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use shared memory void vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y) { } // Allocate a device matrix of same size as M. Matrix allocate_matrix_on_gpu(const Matrix M) { Matrix Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix allocate_matrix(int num_rows, int num_columns, int init) { Matrix M; M.num_columns = M.pitch = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < size; i++){ if(init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } // Copy a host matrix to a device matrix. void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Prints the matrix out to screen void print_matrix(const Matrix M) { for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) printf("%f ", M.elements[i*M.num_columns + j]); printf("\n"); } printf("\n"); } // Returns a random floating-point number between the specified min and max values float get_random_number(int min, int max){ return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX))); } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
d3ec3079d16bbd8c95db27121e0252b68f98dc15.cu
/* Vector-matrix multiplication: Y = A * X. * Host code. * Author: Naga Kandasamy * Date: 11/11/2015 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> #include "vec_mat_mult_kernel.cu" #define MIN_NUMBER 1 #define MAX_NUMBER 4 extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int); Matrix allocate_matrix_on_gpu(const Matrix); Matrix allocate_matrix(int, int, int); void copy_matrix_to_device(Matrix, const Matrix); void copy_matrix_from_device(Matrix, const Matrix); void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix); void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix); void print_matrix(const Matrix); float get_random_number(int, int); int checkResults(float *, float *, int, float); int main(int argc, char** argv) { // Matrices for the program Matrix A; // N x N matrix Matrix X; // N x 1 vector Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector // Initialize the random number generator with a seed value srand(time(NULL)); // Check command line arguments if(argc > 1){ printf("Error. This program accepts no arguments. \n"); exit(0); } // Allocate and initialize the matrices A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0); Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0); // compute the vector-matrix multiplication on the CPU for comparison compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns); // Perform the vector-matrix multiplication on the GPU using global memory // Return the results in Y_gpu_1 vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); int size_elements = NUM_ROWS; int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Perform the vector-matrix multiplication on the GPU using global memory // Return the results in Y_gpu_2 vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Free host matrices free(A.elements); A.elements = NULL; free(X.elements); X.elements = NULL; free(Y_cpu.elements); Y_cpu.elements = NULL; free(Y_gpu_1.elements); Y_gpu_1.elements = NULL; free(Y_gpu_2.elements); Y_gpu_2.elements = NULL; return 0; } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use global memory void vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y) { } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use shared memory void vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y) { } // Allocate a device matrix of same size as M. Matrix allocate_matrix_on_gpu(const Matrix M) { Matrix Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix allocate_matrix(int num_rows, int num_columns, int init) { Matrix M; M.num_columns = M.pitch = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < size; i++){ if(init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } // Copy a host matrix to a device matrix. void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Prints the matrix out to screen void print_matrix(const Matrix M) { for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) printf("%f ", M.elements[i*M.num_columns + j]); printf("\n"); } printf("\n"); } // Returns a random floating-point number between the specified min and max values float get_random_number(int min, int max){ return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX))); } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
1c17f530a58c4513d057a4f81389d981a0b91904.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "conserv_kernels.cu" #include "conserv_kernels_wrappers.cu" //#include "conserv_kernels_wrappers.cu" #include "time_integrator.cu" #include "../quadrature.cu" #include "../basis.cu" extern int local_N; extern int limiter; extern int time_integrator; // limiter optoins #define NO_LIMITER 0 #define LIMITER 1 // time integration options #define RK4 1 #define RK2 2 // riemann solver options #define LLF 1 /* 2dadvec_euler.cu * * This file calls the kernels in 2dadvec_kernels_euler.cu for the 2D advection * DG method. */ /* set quadrature * * sets the 1d quadrature integration points and weights for the boundary integrals * and the 2d quadrature integration points and weights for the volume intergrals. */ void set_quadrature(int n, double **r1_local, double **r2_local, double **w_local, double **s_r, double **oned_w_local, int *n_quad, int *n_quad1d) { int i; /* * The sides are mapped to the canonical element, so we want the integration points * for the boundary integrals for sides s1, s2, and s3 as shown below: s (r2) |\ ^ | \ | | \ | | \ | s3 | \ s2 | | \ | | \ | | \ | |________\ | s1 | ------------------------> r (r1) * */ switch (n) { case 0: *n_quad = 1; *n_quad1d = 1; break; case 1: *n_quad = 3; *n_quad1d = 2; break; case 2: *n_quad = 6; *n_quad1d = 3; break; case 3: *n_quad = 12 ; *n_quad1d = 4; break; case 4: *n_quad = 16; *n_quad1d = 5; break; case 5: *n_quad = 25; *n_quad1d = 6; break; } // allocate integration points *r1_local = (double *) malloc(*n_quad * sizeof(double)); *r2_local = (double *) malloc(*n_quad * sizeof(double)); *w_local = (double *) malloc(*n_quad * sizeof(double)); *s_r = (double *) malloc(*n_quad1d * sizeof(double)); *oned_w_local = (double *) malloc(*n_quad1d * sizeof(double)); // set 2D quadrature rules for (i = 0; i < *n_quad; i++) { if (n > 0) { (*r1_local)[i] = quad_2d[2 * n - 1][3*i]; (*r2_local)[i] = quad_2d[2 * n - 1][3*i+1]; (*w_local) [i] = quad_2d[2 * n - 1][3*i+2] / 2.; //weights are 2 times too big for some reason } else { (*r1_local)[i] = quad_2d[0][3*i]; (*r2_local)[i] = quad_2d[0][3*i+1]; (*w_local) [i] = quad_2d[0][3*i+2] / 2.; //weights are 2 times too big for some reason } } // set 1D quadrature rules for (i = 0; i < *n_quad1d; i++) { (*s_r)[i] = quad_1d[n][2*i]; (*oned_w_local)[i] = quad_1d[n][2*i+1]; } } void checkCudaError(const char *message) { hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) ); exit(-1); } } void read_mesh(FILE *mesh_file, int *num_sides, int *num_elem, double **V1x, double **V1y, double **V2x, double **V2y, double **V3x, double **V3y, int **left_side_number, int **right_side_number, double **sides_x1, double **sides_y1, double **sides_x2, double **sides_y2, int **elem_s1, int **elem_s2, int **elem_s3, int **left_elem, int **right_elem) { int i, items; char line[1000]; // stores the number of sides this element has. fgets(line, 1000, mesh_file); sscanf(line, "%i", num_elem); *V1x = (double *) malloc(*num_elem * sizeof(double)); *V1y = (double *) malloc(*num_elem * sizeof(double)); *V2x = (double *) malloc(*num_elem * sizeof(double)); *V2y = (double *) malloc(*num_elem * sizeof(double)); *V3x = (double *) malloc(*num_elem * sizeof(double)); *V3y = (double *) malloc(*num_elem * sizeof(double)); *elem_s1 = (int *) malloc(*num_elem * sizeof(int)); *elem_s2 = (int *) malloc(*num_elem * sizeof(int)); *elem_s3 = (int *) malloc(*num_elem * sizeof(int)); for (i = 0; i < *num_elem; i++) { fgets(line, sizeof(line), mesh_file); // these three vertices define the element // and boundary_side tells which side is a boundary // while boundary determines the type of boundary items = sscanf(line, "%lf %lf %lf %lf %lf %lf %i %i %i", &(*V1x)[i], &(*V1y)[i], &(*V2x)[i], &(*V2y)[i], &(*V3x)[i], &(*V3y)[i], &(*elem_s1)[i], &(*elem_s2)[i], &(*elem_s3)[i]); if (items != 9) { printf("error: not enough items (%i) while reading elements from mesh.\n", items); exit(0); } } fgets(line, 1000, mesh_file); sscanf(line, "%i", num_sides); *left_side_number = (int *) malloc(*num_sides * sizeof(int)); *right_side_number = (int *) malloc(*num_sides * sizeof(int)); *sides_x1 = (double *) malloc(*num_sides * sizeof(double)); *sides_x2 = (double *) malloc(*num_sides * sizeof(double)); *sides_y1 = (double *) malloc(*num_sides * sizeof(double)); *sides_y2 = (double *) malloc(*num_sides * sizeof(double)); *left_elem = (int *) malloc(*num_sides * sizeof(int)); *right_elem = (int *) malloc(*num_sides * sizeof(int)); for (i = 0; i < *num_sides; i++) { fgets(line, sizeof(line), mesh_file); items = sscanf(line, "%lf %lf %lf %lf %i %i %i %i", &(*sides_x1)[i], &(*sides_y1)[i], &(*sides_x2)[i], &(*sides_y2)[i], &(*left_elem)[i], &(*right_elem)[i], &(*left_side_number)[i], &(*right_side_number)[i]); if (items != 8) { printf("error: not enough items (%i) while reading sides from mesh.\n", items); exit(0); } } } void init_gpu(int num_elem, int num_sides, int n_p, double *V1x, double *V1y, double *V2x, double *V2y, double *V3x, double *V3y, int *left_side_number, int *right_side_number, double *sides_x1, double *sides_y1, double *sides_x2, double *sides_y2, int *elem_s1, int *elem_s2, int *elem_s3, int *left_elem, int *right_elem) { //int reduction_size = (num_elem / 256) + ((num_elem % 256) ? 1 : 0); checkCudaError("error before init."); hipDeviceReset(); double total_memory = num_elem*22*sizeof(double) + num_elem*6*sizeof(int) + num_sides*11*sizeof(double) + num_sides*3*sizeof(int) + local_N*num_elem*n_p*3*sizeof(double) + local_N*num_sides*n_p*2*sizeof(double); switch (time_integrator) { case RK4: total_memory += 5*local_N*num_elem*n_p*sizeof(double); break; case RK2: total_memory += 3*local_N*num_elem*n_p*sizeof(double); break; } if (total_memory < 1e3) { printf("Total memory required: %lf B\n", total_memory); } else if (total_memory >= 1e3 && total_memory < 1e6) { printf("Total memory required: %lf KB\n", total_memory * 1e-3); } else if (total_memory >= 1e6 && total_memory < 1e9) { printf("Total memory required: %lf MB\n", total_memory * 1e-6); } else { printf("Total memory required: %lf GB\n", total_memory * 1e-9); } hipMalloc((void **) &d_c, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_c_prev, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_quad_rhs, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_left_riemann_rhs, local_N * num_sides * n_p * sizeof(double)); hipMalloc((void **) &d_right_riemann_rhs, local_N * num_sides * n_p * sizeof(double)); switch (time_integrator) { case RK4: hipMalloc((void **) &d_kstar, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_k1, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_k2, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_k3, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_k4, local_N * num_elem * n_p * sizeof(double)); break; case RK2: hipMalloc((void **) &d_kstar, local_N * num_elem * n_p * sizeof(double)); hipMalloc((void **) &d_k1, local_N * num_elem * n_p * sizeof(double)); break; default: printf("Error selecting time integrator.\n"); exit(0); } hipMalloc((void **) &d_J , num_elem * sizeof(double)); hipMalloc((void **) &d_lambda , num_elem * sizeof(double)); hipMalloc((void **) &d_s_length , num_sides * sizeof(double)); hipMalloc((void **) &d_s_V1x, num_sides * sizeof(double)); hipMalloc((void **) &d_s_V2x, num_sides * sizeof(double)); hipMalloc((void **) &d_s_V1y, num_sides * sizeof(double)); hipMalloc((void **) &d_s_V2y, num_sides * sizeof(double)); hipMalloc((void **) &d_elem_s1, num_elem * sizeof(int)); hipMalloc((void **) &d_elem_s2, num_elem * sizeof(int)); hipMalloc((void **) &d_elem_s3, num_elem * sizeof(int)); hipMalloc((void **) &d_Uv1, num_elem * sizeof(double)); hipMalloc((void **) &d_Uv2, num_elem * sizeof(double)); hipMalloc((void **) &d_Uv3, num_elem * sizeof(double)); hipMalloc((void **) &d_error, num_elem * sizeof(double)); hipMalloc((void **) &d_V1x, num_elem * sizeof(double)); hipMalloc((void **) &d_V1y, num_elem * sizeof(double)); hipMalloc((void **) &d_V2x, num_elem * sizeof(double)); hipMalloc((void **) &d_V2y, num_elem * sizeof(double)); hipMalloc((void **) &d_V3x, num_elem * sizeof(double)); hipMalloc((void **) &d_V3y, num_elem * sizeof(double)); hipMalloc((void **) &d_xr, num_elem * sizeof(double)); hipMalloc((void **) &d_yr, num_elem * sizeof(double)); hipMalloc((void **) &d_xs, num_elem * sizeof(double)); hipMalloc((void **) &d_ys, num_elem * sizeof(double)); hipMalloc((void **) &d_left_side_number , num_sides * sizeof(int)); hipMalloc((void **) &d_right_side_number, num_sides * sizeof(int)); hipMalloc((void **) &d_Nx, num_sides * sizeof(double)); hipMalloc((void **) &d_Ny, num_sides * sizeof(double)); hipMalloc((void **) &d_right_elem, num_sides * sizeof(int)); hipMalloc((void **) &d_left_elem , num_sides * sizeof(int)); checkCudaError("error after gpu malloc"); // copy over data hipMemcpy(d_s_V1x, sides_x1, num_sides * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_s_V1y, sides_y1, num_sides * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_s_V2x, sides_x2, num_sides * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_s_V2y, sides_y2, num_sides * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_left_side_number , left_side_number , num_sides * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_right_side_number, right_side_number, num_sides * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_elem_s1, elem_s1, num_elem * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_elem_s2, elem_s2, num_elem * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_elem_s3, elem_s3, num_elem * sizeof(int), hipMemcpyHostToDevice); checkCudaError("error after gpu copy"); hipMemcpy(d_V1x, V1x, num_elem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_V1y, V1y, num_elem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_V2x, V2x, num_elem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_V2y, V2y, num_elem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_V3x, V3x, num_elem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_V3y, V3y, num_elem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_left_elem , left_elem , num_sides * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_right_elem, right_elem, num_sides * sizeof(int), hipMemcpyHostToDevice); } void free_gpu() { hipFree(d_c); //hipFree(d_c_prev); hipFree(d_quad_rhs); hipFree(d_left_riemann_rhs); hipFree(d_right_riemann_rhs); switch (time_integrator) { case RK4: hipFree(d_kstar); hipFree(d_k1); hipFree(d_k2); hipFree(d_k3); hipFree(d_k4); break; case RK2: hipFree(d_kstar); hipFree(d_k1); break; } hipFree(d_J); hipFree(d_lambda); hipFree(d_reduction); hipFree(d_s_length); hipFree(d_elem_s1); hipFree(d_elem_s2); hipFree(d_elem_s3); hipFree(d_Uv1); hipFree(d_Uv2); hipFree(d_Uv3); hipFree(d_V1x); hipFree(d_V1y); hipFree(d_V2x); hipFree(d_V2y); hipFree(d_V3x); hipFree(d_V3y); hipFree(d_xr); hipFree(d_yr); hipFree(d_xs); hipFree(d_ys); hipFree(d_left_side_number); hipFree(d_right_side_number); hipFree(d_Nx); hipFree(d_Ny); hipFree(d_right_elem); hipFree(d_left_elem); } void usage_error() { printf("\nUsage: dgcuda [OPTIONS] [MESH] [OUTFILE]\n"); printf(" Options: [-n] Order of polynomial approximation.\n"); printf(" [-t] Number of timesteps.\n"); printf(" [-d] Debug.\n"); } int get_input(int argc, char *argv[], int *n, int *timesteps, double *endtime, char **mesh_filename) { int i; *timesteps = 1; // read command line input if (argc < 5) { usage_error(); return 1; } for (i = 0; i < argc; i++) { // order of polynomial if (strcmp(argv[i], "-n") == 0) { if (i + 1 < argc) { *n = atoi(argv[i+1]); if (*n < 0 || *n > 5) { usage_error(); return 1; } } else { usage_error(); return 1; } } // number of timesteps if (strcmp(argv[i], "-t") == 0) { if (i + 1 < argc) { *timesteps = atoi(argv[i+1]); if (*timesteps < 0) { usage_error(); return 1; } } else { usage_error(); return 1; } } if (strcmp(argv[i], "-T") == 0) { if (i + 1 < argc) { *endtime = atof(argv[i+1]); if (*endtime < 0) { usage_error(); return 1; } } else { usage_error(); return 1; } } } // second last argument is filename *mesh_filename = argv[argc - 1]; return 0; }
1c17f530a58c4513d057a4f81389d981a0b91904.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include "conserv_kernels.cu" #include "conserv_kernels_wrappers.cu" //#include "conserv_kernels_wrappers.cu" #include "time_integrator.cu" #include "../quadrature.cu" #include "../basis.cu" extern int local_N; extern int limiter; extern int time_integrator; // limiter optoins #define NO_LIMITER 0 #define LIMITER 1 // time integration options #define RK4 1 #define RK2 2 // riemann solver options #define LLF 1 /* 2dadvec_euler.cu * * This file calls the kernels in 2dadvec_kernels_euler.cu for the 2D advection * DG method. */ /* set quadrature * * sets the 1d quadrature integration points and weights for the boundary integrals * and the 2d quadrature integration points and weights for the volume intergrals. */ void set_quadrature(int n, double **r1_local, double **r2_local, double **w_local, double **s_r, double **oned_w_local, int *n_quad, int *n_quad1d) { int i; /* * The sides are mapped to the canonical element, so we want the integration points * for the boundary integrals for sides s1, s2, and s3 as shown below: s (r2) |\ ^ | \ | | \ | | \ | s3 | \ s2 | | \ | | \ | | \ | |________\ | s1 | ------------------------> r (r1) * */ switch (n) { case 0: *n_quad = 1; *n_quad1d = 1; break; case 1: *n_quad = 3; *n_quad1d = 2; break; case 2: *n_quad = 6; *n_quad1d = 3; break; case 3: *n_quad = 12 ; *n_quad1d = 4; break; case 4: *n_quad = 16; *n_quad1d = 5; break; case 5: *n_quad = 25; *n_quad1d = 6; break; } // allocate integration points *r1_local = (double *) malloc(*n_quad * sizeof(double)); *r2_local = (double *) malloc(*n_quad * sizeof(double)); *w_local = (double *) malloc(*n_quad * sizeof(double)); *s_r = (double *) malloc(*n_quad1d * sizeof(double)); *oned_w_local = (double *) malloc(*n_quad1d * sizeof(double)); // set 2D quadrature rules for (i = 0; i < *n_quad; i++) { if (n > 0) { (*r1_local)[i] = quad_2d[2 * n - 1][3*i]; (*r2_local)[i] = quad_2d[2 * n - 1][3*i+1]; (*w_local) [i] = quad_2d[2 * n - 1][3*i+2] / 2.; //weights are 2 times too big for some reason } else { (*r1_local)[i] = quad_2d[0][3*i]; (*r2_local)[i] = quad_2d[0][3*i+1]; (*w_local) [i] = quad_2d[0][3*i+2] / 2.; //weights are 2 times too big for some reason } } // set 1D quadrature rules for (i = 0; i < *n_quad1d; i++) { (*s_r)[i] = quad_1d[n][2*i]; (*oned_w_local)[i] = quad_1d[n][2*i+1]; } } void checkCudaError(const char *message) { cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) ); exit(-1); } } void read_mesh(FILE *mesh_file, int *num_sides, int *num_elem, double **V1x, double **V1y, double **V2x, double **V2y, double **V3x, double **V3y, int **left_side_number, int **right_side_number, double **sides_x1, double **sides_y1, double **sides_x2, double **sides_y2, int **elem_s1, int **elem_s2, int **elem_s3, int **left_elem, int **right_elem) { int i, items; char line[1000]; // stores the number of sides this element has. fgets(line, 1000, mesh_file); sscanf(line, "%i", num_elem); *V1x = (double *) malloc(*num_elem * sizeof(double)); *V1y = (double *) malloc(*num_elem * sizeof(double)); *V2x = (double *) malloc(*num_elem * sizeof(double)); *V2y = (double *) malloc(*num_elem * sizeof(double)); *V3x = (double *) malloc(*num_elem * sizeof(double)); *V3y = (double *) malloc(*num_elem * sizeof(double)); *elem_s1 = (int *) malloc(*num_elem * sizeof(int)); *elem_s2 = (int *) malloc(*num_elem * sizeof(int)); *elem_s3 = (int *) malloc(*num_elem * sizeof(int)); for (i = 0; i < *num_elem; i++) { fgets(line, sizeof(line), mesh_file); // these three vertices define the element // and boundary_side tells which side is a boundary // while boundary determines the type of boundary items = sscanf(line, "%lf %lf %lf %lf %lf %lf %i %i %i", &(*V1x)[i], &(*V1y)[i], &(*V2x)[i], &(*V2y)[i], &(*V3x)[i], &(*V3y)[i], &(*elem_s1)[i], &(*elem_s2)[i], &(*elem_s3)[i]); if (items != 9) { printf("error: not enough items (%i) while reading elements from mesh.\n", items); exit(0); } } fgets(line, 1000, mesh_file); sscanf(line, "%i", num_sides); *left_side_number = (int *) malloc(*num_sides * sizeof(int)); *right_side_number = (int *) malloc(*num_sides * sizeof(int)); *sides_x1 = (double *) malloc(*num_sides * sizeof(double)); *sides_x2 = (double *) malloc(*num_sides * sizeof(double)); *sides_y1 = (double *) malloc(*num_sides * sizeof(double)); *sides_y2 = (double *) malloc(*num_sides * sizeof(double)); *left_elem = (int *) malloc(*num_sides * sizeof(int)); *right_elem = (int *) malloc(*num_sides * sizeof(int)); for (i = 0; i < *num_sides; i++) { fgets(line, sizeof(line), mesh_file); items = sscanf(line, "%lf %lf %lf %lf %i %i %i %i", &(*sides_x1)[i], &(*sides_y1)[i], &(*sides_x2)[i], &(*sides_y2)[i], &(*left_elem)[i], &(*right_elem)[i], &(*left_side_number)[i], &(*right_side_number)[i]); if (items != 8) { printf("error: not enough items (%i) while reading sides from mesh.\n", items); exit(0); } } } void init_gpu(int num_elem, int num_sides, int n_p, double *V1x, double *V1y, double *V2x, double *V2y, double *V3x, double *V3y, int *left_side_number, int *right_side_number, double *sides_x1, double *sides_y1, double *sides_x2, double *sides_y2, int *elem_s1, int *elem_s2, int *elem_s3, int *left_elem, int *right_elem) { //int reduction_size = (num_elem / 256) + ((num_elem % 256) ? 1 : 0); checkCudaError("error before init."); cudaDeviceReset(); double total_memory = num_elem*22*sizeof(double) + num_elem*6*sizeof(int) + num_sides*11*sizeof(double) + num_sides*3*sizeof(int) + local_N*num_elem*n_p*3*sizeof(double) + local_N*num_sides*n_p*2*sizeof(double); switch (time_integrator) { case RK4: total_memory += 5*local_N*num_elem*n_p*sizeof(double); break; case RK2: total_memory += 3*local_N*num_elem*n_p*sizeof(double); break; } if (total_memory < 1e3) { printf("Total memory required: %lf B\n", total_memory); } else if (total_memory >= 1e3 && total_memory < 1e6) { printf("Total memory required: %lf KB\n", total_memory * 1e-3); } else if (total_memory >= 1e6 && total_memory < 1e9) { printf("Total memory required: %lf MB\n", total_memory * 1e-6); } else { printf("Total memory required: %lf GB\n", total_memory * 1e-9); } cudaMalloc((void **) &d_c, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_c_prev, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_quad_rhs, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_left_riemann_rhs, local_N * num_sides * n_p * sizeof(double)); cudaMalloc((void **) &d_right_riemann_rhs, local_N * num_sides * n_p * sizeof(double)); switch (time_integrator) { case RK4: cudaMalloc((void **) &d_kstar, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_k1, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_k2, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_k3, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_k4, local_N * num_elem * n_p * sizeof(double)); break; case RK2: cudaMalloc((void **) &d_kstar, local_N * num_elem * n_p * sizeof(double)); cudaMalloc((void **) &d_k1, local_N * num_elem * n_p * sizeof(double)); break; default: printf("Error selecting time integrator.\n"); exit(0); } cudaMalloc((void **) &d_J , num_elem * sizeof(double)); cudaMalloc((void **) &d_lambda , num_elem * sizeof(double)); cudaMalloc((void **) &d_s_length , num_sides * sizeof(double)); cudaMalloc((void **) &d_s_V1x, num_sides * sizeof(double)); cudaMalloc((void **) &d_s_V2x, num_sides * sizeof(double)); cudaMalloc((void **) &d_s_V1y, num_sides * sizeof(double)); cudaMalloc((void **) &d_s_V2y, num_sides * sizeof(double)); cudaMalloc((void **) &d_elem_s1, num_elem * sizeof(int)); cudaMalloc((void **) &d_elem_s2, num_elem * sizeof(int)); cudaMalloc((void **) &d_elem_s3, num_elem * sizeof(int)); cudaMalloc((void **) &d_Uv1, num_elem * sizeof(double)); cudaMalloc((void **) &d_Uv2, num_elem * sizeof(double)); cudaMalloc((void **) &d_Uv3, num_elem * sizeof(double)); cudaMalloc((void **) &d_error, num_elem * sizeof(double)); cudaMalloc((void **) &d_V1x, num_elem * sizeof(double)); cudaMalloc((void **) &d_V1y, num_elem * sizeof(double)); cudaMalloc((void **) &d_V2x, num_elem * sizeof(double)); cudaMalloc((void **) &d_V2y, num_elem * sizeof(double)); cudaMalloc((void **) &d_V3x, num_elem * sizeof(double)); cudaMalloc((void **) &d_V3y, num_elem * sizeof(double)); cudaMalloc((void **) &d_xr, num_elem * sizeof(double)); cudaMalloc((void **) &d_yr, num_elem * sizeof(double)); cudaMalloc((void **) &d_xs, num_elem * sizeof(double)); cudaMalloc((void **) &d_ys, num_elem * sizeof(double)); cudaMalloc((void **) &d_left_side_number , num_sides * sizeof(int)); cudaMalloc((void **) &d_right_side_number, num_sides * sizeof(int)); cudaMalloc((void **) &d_Nx, num_sides * sizeof(double)); cudaMalloc((void **) &d_Ny, num_sides * sizeof(double)); cudaMalloc((void **) &d_right_elem, num_sides * sizeof(int)); cudaMalloc((void **) &d_left_elem , num_sides * sizeof(int)); checkCudaError("error after gpu malloc"); // copy over data cudaMemcpy(d_s_V1x, sides_x1, num_sides * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_s_V1y, sides_y1, num_sides * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_s_V2x, sides_x2, num_sides * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_s_V2y, sides_y2, num_sides * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_left_side_number , left_side_number , num_sides * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_right_side_number, right_side_number, num_sides * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_elem_s1, elem_s1, num_elem * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_elem_s2, elem_s2, num_elem * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_elem_s3, elem_s3, num_elem * sizeof(int), cudaMemcpyHostToDevice); checkCudaError("error after gpu copy"); cudaMemcpy(d_V1x, V1x, num_elem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_V1y, V1y, num_elem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_V2x, V2x, num_elem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_V2y, V2y, num_elem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_V3x, V3x, num_elem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_V3y, V3y, num_elem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_left_elem , left_elem , num_sides * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_right_elem, right_elem, num_sides * sizeof(int), cudaMemcpyHostToDevice); } void free_gpu() { cudaFree(d_c); //cudaFree(d_c_prev); cudaFree(d_quad_rhs); cudaFree(d_left_riemann_rhs); cudaFree(d_right_riemann_rhs); switch (time_integrator) { case RK4: cudaFree(d_kstar); cudaFree(d_k1); cudaFree(d_k2); cudaFree(d_k3); cudaFree(d_k4); break; case RK2: cudaFree(d_kstar); cudaFree(d_k1); break; } cudaFree(d_J); cudaFree(d_lambda); cudaFree(d_reduction); cudaFree(d_s_length); cudaFree(d_elem_s1); cudaFree(d_elem_s2); cudaFree(d_elem_s3); cudaFree(d_Uv1); cudaFree(d_Uv2); cudaFree(d_Uv3); cudaFree(d_V1x); cudaFree(d_V1y); cudaFree(d_V2x); cudaFree(d_V2y); cudaFree(d_V3x); cudaFree(d_V3y); cudaFree(d_xr); cudaFree(d_yr); cudaFree(d_xs); cudaFree(d_ys); cudaFree(d_left_side_number); cudaFree(d_right_side_number); cudaFree(d_Nx); cudaFree(d_Ny); cudaFree(d_right_elem); cudaFree(d_left_elem); } void usage_error() { printf("\nUsage: dgcuda [OPTIONS] [MESH] [OUTFILE]\n"); printf(" Options: [-n] Order of polynomial approximation.\n"); printf(" [-t] Number of timesteps.\n"); printf(" [-d] Debug.\n"); } int get_input(int argc, char *argv[], int *n, int *timesteps, double *endtime, char **mesh_filename) { int i; *timesteps = 1; // read command line input if (argc < 5) { usage_error(); return 1; } for (i = 0; i < argc; i++) { // order of polynomial if (strcmp(argv[i], "-n") == 0) { if (i + 1 < argc) { *n = atoi(argv[i+1]); if (*n < 0 || *n > 5) { usage_error(); return 1; } } else { usage_error(); return 1; } } // number of timesteps if (strcmp(argv[i], "-t") == 0) { if (i + 1 < argc) { *timesteps = atoi(argv[i+1]); if (*timesteps < 0) { usage_error(); return 1; } } else { usage_error(); return 1; } } if (strcmp(argv[i], "-T") == 0) { if (i + 1 < argc) { *endtime = atof(argv[i+1]); if (*endtime < 0) { usage_error(); return 1; } } else { usage_error(); return 1; } } } // second last argument is filename *mesh_filename = argv[argc - 1]; return 0; }
b3060fbfb8e8aaaac36b106019a5747f97b35572.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the rest is the same as normal convolution. */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = float; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 4; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha in linear combination cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// int run() { // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( {1, 7, 7, 512}, // activation {512, 3, 3, 512}, // filter {1, 1, 1, 1}, // padding {1, 1}, // striding {1, 1}, // dilation cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation) 1 // split-k slices ); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent()); // Create tensor C with dimensions 1x1x1xk which is the bias vector cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K}); // Create tensor D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent()); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent()); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), // <- reference to tensor A on device tensor_b.device_ref(), // <- reference to tensor B on device // tensor C is treated as the bias vector. We can enable the CONV // to project away the N, H, W dimension by setting the stride to zero. {tensor_c_bias.device_data(), LayoutOutput::Stride(0)}, tensor_d.device_ref(), // <- reference to tensor D on device {alpha} }; // Instantiate CUTLASS kernel depending on templates ImplicitGemm implicit_gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not cutlass::Status status = implicit_gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = implicit_gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference conv kernel // // Launch device reference to compute strictly the product A * B cutlass::reference::device::Conv2d< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>> ( cutlass::conv::Operator::kFprop, problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c_bias.device_ref(), tensor_ref_d.device_ref(), alpha, 0 ); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int n = 0; n < problem_size.N; ++n) { for (int p = 0; p < problem_size.P; ++p) { for (int q = 0; q < problem_size.Q; ++q) { for (int k = 0; k < problem_size.K; ++k) { tensor_ref_d.at({n, p, q, k}) = ::max(ElementOutput(0), ElementOutput(tensor_ref_d.at({n, p, q, k}) + tensor_c_bias.at({0, 0, 0, k}))); } } } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; CUDA_CHECK(hipGetDeviceProperties(&props, 0)); if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } return run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
b3060fbfb8e8aaaac36b106019a5747f97b35572.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the rest is the same as normal convolution. */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = float; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 4; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha in linear combination cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// int run() { // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( {1, 7, 7, 512}, // activation {512, 3, 3, 512}, // filter {1, 1, 1, 1}, // padding {1, 1}, // striding {1, 1}, // dilation cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation) 1 // split-k slices ); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent()); // Create tensor C with dimensions 1x1x1xk which is the bias vector cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K}); // Create tensor D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent()); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent()); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), // <- reference to tensor A on device tensor_b.device_ref(), // <- reference to tensor B on device // tensor C is treated as the bias vector. We can enable the CONV // to project away the N, H, W dimension by setting the stride to zero. {tensor_c_bias.device_data(), LayoutOutput::Stride(0)}, tensor_d.device_ref(), // <- reference to tensor D on device {alpha} }; // Instantiate CUTLASS kernel depending on templates ImplicitGemm implicit_gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not cutlass::Status status = implicit_gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = implicit_gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference conv kernel // // Launch device reference to compute strictly the product A * B cutlass::reference::device::Conv2d< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>> ( cutlass::conv::Operator::kFprop, problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c_bias.device_ref(), tensor_ref_d.device_ref(), alpha, 0 ); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int n = 0; n < problem_size.N; ++n) { for (int p = 0; p < problem_size.P; ++p) { for (int q = 0; q < problem_size.Q; ++q) { for (int k = 0; k < problem_size.K; ++k) { tensor_ref_d.at({n, p, q, k}) = std::max(ElementOutput(0), ElementOutput(tensor_ref_d.at({n, p, q, k}) + tensor_c_bias.at({0, 0, 0, k}))); } } } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } return run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
74a7946e45fdaabb358569df24848c2571c5d00c.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <torch/extension.h> #include "bilateral.h" #include "filtering/permutohedral/permutohedral.h" #include "utils/meta_macros.h" #include "utils/tensor_description.h" __constant__ int cBatchStride; __constant__ int cChannelStride; __constant__ int cSpatialStrides[3]; __constant__ float cInvSpatialSigma; __constant__ float cInvColorSigma; template <typename scalar_t, int C, int D> __global__ void FeatureCreation(const scalar_t* inputTensor, scalar_t* outputData, scalar_t* outputFeatures) { int elementIndex = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y; if (elementIndex >= cChannelStride) return; int dataBatchOffset = batchIndex * cBatchStride; int featureBatchOffset = batchIndex * (D + C) * cChannelStride; #pragma unroll for (int i = 0; i < C; i++) { outputData[dataBatchOffset + elementIndex * C + i] = inputTensor[dataBatchOffset + elementIndex + i * cChannelStride]; outputFeatures[featureBatchOffset + elementIndex * (C + D) + i] = inputTensor[dataBatchOffset + elementIndex + i * cChannelStride] * cInvColorSigma; } int remainder = elementIndex; #pragma unroll for (int i = 0; i < D; i++) { int coord = remainder / cSpatialStrides[i]; remainder -= coord * cSpatialStrides[i]; outputFeatures[featureBatchOffset + elementIndex * (C + D) + C + i] = coord * cInvSpatialSigma; } } template <typename scalar_t, int C> __global__ void WriteOutput(const scalar_t* data, scalar_t* outputTensor) { int elementIndex = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y; if (elementIndex >= cChannelStride) return; int batchOffset = batchIndex * cBatchStride; #pragma unroll for (int i = 0; i < C; i++) { outputTensor[batchOffset + elementIndex + i * cChannelStride] = data[batchOffset + elementIndex * C + i]; } } template <typename scalar_t, int C, int D> void BilateralFilterPHLCuda( torch::Tensor inputTensor, torch::Tensor outputTensor, float spatialSigma, float colorSigma) { // Getting tensor description. TensorDescription desc = TensorDescription(inputTensor); int featureChannelCount = desc.channelCount + desc.dimensions; // Pre calculating inverse sigmas. float invSpatialSigma = 1.0f / spatialSigma; float invColorSigma = 1.0f / colorSigma; // Preparing global memory scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>(); scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>(); scalar_t* data; scalar_t* features; hipMalloc(&data, desc.batchCount * desc.channelStride * desc.channelCount * sizeof(scalar_t)); hipMalloc(&features, desc.batchCount * desc.channelStride * featureChannelCount * sizeof(scalar_t)); // Preparing constant memory hipMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int)); hipMemcpyToSymbol(cChannelStride, &desc.channelStride, sizeof(int)); hipMemcpyToSymbol(cSpatialStrides, desc.strides, sizeof(int) * desc.dimensions); hipMemcpyToSymbol(cInvSpatialSigma, &invSpatialSigma, sizeof(float)); hipMemcpyToSymbol(cInvColorSigma, &invColorSigma, sizeof(float)); #define BLOCK_SIZE 32 // Creating features hipLaunchKernelGGL(( FeatureCreation<scalar_t, C, D>) , dim3(dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount)), dim3(dim3(BLOCK_SIZE, 1)), 0, 0, inputTensorData, data, features); // Filtering data with respect to the features for each sample in batch for (int batchIndex = 0; batchIndex < desc.batchCount; batchIndex++) { scalar_t* offsetData = data + batchIndex * desc.batchStride; scalar_t* offsetFeatures = features + batchIndex * featureChannelCount * desc.channelStride; PermutohedralCuda<scalar_t, C, C + D>(offsetData, offsetFeatures, desc.channelStride, true); } // Writing output hipLaunchKernelGGL(( WriteOutput<scalar_t, C>), dim3(dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount)), dim3(dim3(BLOCK_SIZE, 1)), 0, 0, data, outputTensorData); hipFree(data); hipFree(features); } // Function to choose template implementation based on dynamic, channels and dimensions torch::Tensor BilateralFilterPHLCuda(torch::Tensor inputTensor, float spatialSigma, float colorSigma) { torch::Tensor outputTensor = torch::zeros_like(inputTensor); #define CASE(c, d) \ AT_DISPATCH_FLOATING_TYPES(inputTensor.scalar_type(), "BilateralFilterCudaPHL", ([&] { \ BilateralFilterPHLCuda<scalar_t, c, d>( \ inputTensor, outputTensor, spatialSigma, colorSigma); \ })); SWITCH_AB(CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, inputTensor.size(1), inputTensor.dim() - 2); return outputTensor; }
74a7946e45fdaabb358569df24848c2571c5d00c.cu
/* Copyright (c) MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> #include <torch/extension.h> #include "bilateral.h" #include "filtering/permutohedral/permutohedral.h" #include "utils/meta_macros.h" #include "utils/tensor_description.h" __constant__ int cBatchStride; __constant__ int cChannelStride; __constant__ int cSpatialStrides[3]; __constant__ float cInvSpatialSigma; __constant__ float cInvColorSigma; template <typename scalar_t, int C, int D> __global__ void FeatureCreation(const scalar_t* inputTensor, scalar_t* outputData, scalar_t* outputFeatures) { int elementIndex = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y; if (elementIndex >= cChannelStride) return; int dataBatchOffset = batchIndex * cBatchStride; int featureBatchOffset = batchIndex * (D + C) * cChannelStride; #pragma unroll for (int i = 0; i < C; i++) { outputData[dataBatchOffset + elementIndex * C + i] = inputTensor[dataBatchOffset + elementIndex + i * cChannelStride]; outputFeatures[featureBatchOffset + elementIndex * (C + D) + i] = inputTensor[dataBatchOffset + elementIndex + i * cChannelStride] * cInvColorSigma; } int remainder = elementIndex; #pragma unroll for (int i = 0; i < D; i++) { int coord = remainder / cSpatialStrides[i]; remainder -= coord * cSpatialStrides[i]; outputFeatures[featureBatchOffset + elementIndex * (C + D) + C + i] = coord * cInvSpatialSigma; } } template <typename scalar_t, int C> __global__ void WriteOutput(const scalar_t* data, scalar_t* outputTensor) { int elementIndex = blockIdx.x * blockDim.x + threadIdx.x; int batchIndex = blockIdx.y; if (elementIndex >= cChannelStride) return; int batchOffset = batchIndex * cBatchStride; #pragma unroll for (int i = 0; i < C; i++) { outputTensor[batchOffset + elementIndex + i * cChannelStride] = data[batchOffset + elementIndex * C + i]; } } template <typename scalar_t, int C, int D> void BilateralFilterPHLCuda( torch::Tensor inputTensor, torch::Tensor outputTensor, float spatialSigma, float colorSigma) { // Getting tensor description. TensorDescription desc = TensorDescription(inputTensor); int featureChannelCount = desc.channelCount + desc.dimensions; // Pre calculating inverse sigmas. float invSpatialSigma = 1.0f / spatialSigma; float invColorSigma = 1.0f / colorSigma; // Preparing global memory scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>(); scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>(); scalar_t* data; scalar_t* features; cudaMalloc(&data, desc.batchCount * desc.channelStride * desc.channelCount * sizeof(scalar_t)); cudaMalloc(&features, desc.batchCount * desc.channelStride * featureChannelCount * sizeof(scalar_t)); // Preparing constant memory cudaMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int)); cudaMemcpyToSymbol(cChannelStride, &desc.channelStride, sizeof(int)); cudaMemcpyToSymbol(cSpatialStrides, desc.strides, sizeof(int) * desc.dimensions); cudaMemcpyToSymbol(cInvSpatialSigma, &invSpatialSigma, sizeof(float)); cudaMemcpyToSymbol(cInvColorSigma, &invColorSigma, sizeof(float)); #define BLOCK_SIZE 32 // Creating features FeatureCreation<scalar_t, C, D> <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>( inputTensorData, data, features); // Filtering data with respect to the features for each sample in batch for (int batchIndex = 0; batchIndex < desc.batchCount; batchIndex++) { scalar_t* offsetData = data + batchIndex * desc.batchStride; scalar_t* offsetFeatures = features + batchIndex * featureChannelCount * desc.channelStride; PermutohedralCuda<scalar_t, C, C + D>(offsetData, offsetFeatures, desc.channelStride, true); } // Writing output WriteOutput<scalar_t, C><<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>( data, outputTensorData); cudaFree(data); cudaFree(features); } // Function to choose template implementation based on dynamic, channels and dimensions torch::Tensor BilateralFilterPHLCuda(torch::Tensor inputTensor, float spatialSigma, float colorSigma) { torch::Tensor outputTensor = torch::zeros_like(inputTensor); #define CASE(c, d) \ AT_DISPATCH_FLOATING_TYPES(inputTensor.scalar_type(), "BilateralFilterCudaPHL", ([&] { \ BilateralFilterPHLCuda<scalar_t, c, d>( \ inputTensor, outputTensor, spatialSigma, colorSigma); \ })); SWITCH_AB(CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, inputTensor.size(1), inputTensor.dim() - 2); return outputTensor; }
9af384a67c7fa8762f62bf5f7111cf416cbd638c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> using namespace std; #define N 100 __global__ void add(int *a, int *b, int *c){ int tid = blockIdx.x; if(tid<N){ c[tid] = a[tid] +b[tid]; } } int main(){ int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; for(int i=0; i<N; i++){ a[i] = i; b[i] = 2; } hipMalloc((void**) &dev_a, N*sizeof(int)); hipMalloc((void**) &dev_b, N*sizeof(int)); hipMalloc((void**) &dev_c, N*sizeof(int)); hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_c, c, N*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost); for(auto item:c){ cout<<item<<endl; } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
9af384a67c7fa8762f62bf5f7111cf416cbd638c.cu
#include<iostream> using namespace std; #define N 100 __global__ void add(int *a, int *b, int *c){ int tid = blockIdx.x; if(tid<N){ c[tid] = a[tid] +b[tid]; } } int main(){ int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; for(int i=0; i<N; i++){ a[i] = i; b[i] = 2; } cudaMalloc((void**) &dev_a, N*sizeof(int)); cudaMalloc((void**) &dev_b, N*sizeof(int)); cudaMalloc((void**) &dev_c, N*sizeof(int)); cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice); add<<<N,1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); for(auto item:c){ cout<<item<<endl; } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
53944435df7cf057ea9a94a93260d361a46bc471.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/regex/regex.cuh> #include <strings/utilities.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/replace_re.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace strings { namespace detail { namespace { // this is a [begin,end) pair of character positions when a substring is matched using found_range = thrust::pair<size_type, size_type>; /** * @brief This functor handles replacing strings by applying the compiled regex patterns * and inserting the corresponding new string within the matched range of characters. * * The logic includes computing the size of each string and also writing the output. * * The stack is used to keep progress on evaluating the regex instructions on each string. * So the size of the stack is in proportion to the number of instructions in the given regex * pattern. * * There are three call types based on the number of regex instructions in the given pattern. * Small to medium instruction lengths can use the stack effectively though smaller executes faster. * Longer patterns require global memory. Shorter patterns are common in data cleaning. */ template <int stack_size> struct replace_multi_regex_fn { column_device_view const d_strings; device_span<reprog_device const> progs; // array of regex progs found_range* d_found_ranges; // working array matched (begin,end) values column_device_view const d_repls; // replacement strings int32_t* d_offsets{}; char* d_chars{}; __device__ void operator()(size_type idx) { if (d_strings.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } auto const number_of_patterns = static_cast<size_type>(progs.size()); auto const d_str = d_strings.element<string_view>(idx); auto const nchars = d_str.length(); // number of characters in input string auto nbytes = d_str.size_bytes(); // number of bytes in input string auto in_ptr = d_str.data(); // input pointer auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; found_range* d_ranges = d_found_ranges + (idx * number_of_patterns); size_type lpos = 0; size_type ch_pos = 0; // initialize the working ranges memory to -1's thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1}); // process string one character at a time while (ch_pos < nchars) { // this minimizes the regex-find calls by only calling it for stale patterns // -- those that have not previously matched up to this point (ch_pos) for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) { if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here continue; // or later in the string reprog_device prog = progs[ptn_idx]; auto begin = static_cast<int32_t>(ch_pos); auto end = static_cast<int32_t>(nchars); if (!prog.is_empty() && prog.find<stack_size>(idx, d_str, begin, end) > 0) d_ranges[ptn_idx] = found_range{begin, end}; // found a match else d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done } // all the ranges have been updated from each regex match; // look for any that match at this character position (ch_pos) auto itr = thrust::find_if(thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos](auto range) { return range.first == ch_pos; }); if (itr != d_ranges + number_of_patterns) { // match found, compute and replace the string in the output size_type ptn_idx = static_cast<size_type>(itr - d_ranges); size_type begin = d_ranges[ptn_idx].first; size_type end = d_ranges[ptn_idx].second; string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx) : d_repls.element<string_view>(0); auto spos = d_str.byte_offset(begin); auto epos = d_str.byte_offset(end); nbytes += d_repl.size_bytes() - (epos - spos); if (out_ptr) { // copy unmodified content plus new replacement string out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos); out_ptr = copy_string(out_ptr, d_repl); lpos = epos; } ch_pos = end - 1; } ++ch_pos; } if (out_ptr) // copy the remainder memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos); else d_offsets[idx] = static_cast<int32_t>(nbytes); } }; } // namespace std::unique_ptr<column> replace_re( strings_column_view const& strings, std::vector<std::string> const& patterns, strings_column_view const& replacements, regex_flags const flags, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { auto strings_count = strings.size(); if (strings_count == 0) return make_empty_column(type_id::STRING); if (patterns.empty()) // no patterns; just return a copy return std::make_unique<column>(strings.parent(), stream, mr); CUDF_EXPECTS(!replacements.has_nulls(), "Parameter replacements must not have any nulls"); auto d_strings = column_device_view::create(strings.parent(), stream); auto d_repls = column_device_view::create(replacements.parent(), stream); auto d_char_table = get_character_flags_table(); // compile regexes into device objects size_type regex_insts = 0; std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs; std::vector<reprog_device> progs; for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) { auto prog = reprog_device::create(*itr, flags, d_char_table, strings_count, stream); regex_insts = ::max(regex_insts, prog->insts_counts()); progs.push_back(*prog); h_progs.emplace_back(std::move(prog)); } // copy all the reprog_device instances to a device memory array auto d_progs = cudf::detail::make_device_uvector_async(progs, stream); // create working buffer for ranges pairs rmm::device_uvector<found_range> found_ranges(patterns.size() * strings_count, stream); auto d_found_ranges = found_ranges.data(); // create child columns auto children = [&] { // Each invocation is predicated on the stack size which is dependent on the number of regex // instructions if (regex_insts <= RX_SMALL_INSTS) { replace_multi_regex_fn<RX_STACK_SMALL> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } else if (regex_insts <= RX_MEDIUM_INSTS) { replace_multi_regex_fn<RX_STACK_MEDIUM> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } else if (regex_insts <= RX_LARGE_INSTS) { replace_multi_regex_fn<RX_STACK_LARGE> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } else { replace_multi_regex_fn<RX_STACK_ANY> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } }(); return make_strings_column(strings_count, std::move(children.first), std::move(children.second), strings.null_count(), cudf::detail::copy_bitmask(strings.parent(), stream, mr)); } } // namespace detail // external API std::unique_ptr<column> replace_re(strings_column_view const& strings, std::vector<std::string> const& patterns, strings_column_view const& replacements, regex_flags const flags, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::replace_re(strings, patterns, replacements, flags, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
53944435df7cf057ea9a94a93260d361a46bc471.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/regex/regex.cuh> #include <strings/utilities.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/replace_re.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace strings { namespace detail { namespace { // this is a [begin,end) pair of character positions when a substring is matched using found_range = thrust::pair<size_type, size_type>; /** * @brief This functor handles replacing strings by applying the compiled regex patterns * and inserting the corresponding new string within the matched range of characters. * * The logic includes computing the size of each string and also writing the output. * * The stack is used to keep progress on evaluating the regex instructions on each string. * So the size of the stack is in proportion to the number of instructions in the given regex * pattern. * * There are three call types based on the number of regex instructions in the given pattern. * Small to medium instruction lengths can use the stack effectively though smaller executes faster. * Longer patterns require global memory. Shorter patterns are common in data cleaning. */ template <int stack_size> struct replace_multi_regex_fn { column_device_view const d_strings; device_span<reprog_device const> progs; // array of regex progs found_range* d_found_ranges; // working array matched (begin,end) values column_device_view const d_repls; // replacement strings int32_t* d_offsets{}; char* d_chars{}; __device__ void operator()(size_type idx) { if (d_strings.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } auto const number_of_patterns = static_cast<size_type>(progs.size()); auto const d_str = d_strings.element<string_view>(idx); auto const nchars = d_str.length(); // number of characters in input string auto nbytes = d_str.size_bytes(); // number of bytes in input string auto in_ptr = d_str.data(); // input pointer auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; found_range* d_ranges = d_found_ranges + (idx * number_of_patterns); size_type lpos = 0; size_type ch_pos = 0; // initialize the working ranges memory to -1's thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1}); // process string one character at a time while (ch_pos < nchars) { // this minimizes the regex-find calls by only calling it for stale patterns // -- those that have not previously matched up to this point (ch_pos) for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) { if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here continue; // or later in the string reprog_device prog = progs[ptn_idx]; auto begin = static_cast<int32_t>(ch_pos); auto end = static_cast<int32_t>(nchars); if (!prog.is_empty() && prog.find<stack_size>(idx, d_str, begin, end) > 0) d_ranges[ptn_idx] = found_range{begin, end}; // found a match else d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done } // all the ranges have been updated from each regex match; // look for any that match at this character position (ch_pos) auto itr = thrust::find_if(thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos](auto range) { return range.first == ch_pos; }); if (itr != d_ranges + number_of_patterns) { // match found, compute and replace the string in the output size_type ptn_idx = static_cast<size_type>(itr - d_ranges); size_type begin = d_ranges[ptn_idx].first; size_type end = d_ranges[ptn_idx].second; string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx) : d_repls.element<string_view>(0); auto spos = d_str.byte_offset(begin); auto epos = d_str.byte_offset(end); nbytes += d_repl.size_bytes() - (epos - spos); if (out_ptr) { // copy unmodified content plus new replacement string out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos); out_ptr = copy_string(out_ptr, d_repl); lpos = epos; } ch_pos = end - 1; } ++ch_pos; } if (out_ptr) // copy the remainder memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos); else d_offsets[idx] = static_cast<int32_t>(nbytes); } }; } // namespace std::unique_ptr<column> replace_re( strings_column_view const& strings, std::vector<std::string> const& patterns, strings_column_view const& replacements, regex_flags const flags, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { auto strings_count = strings.size(); if (strings_count == 0) return make_empty_column(type_id::STRING); if (patterns.empty()) // no patterns; just return a copy return std::make_unique<column>(strings.parent(), stream, mr); CUDF_EXPECTS(!replacements.has_nulls(), "Parameter replacements must not have any nulls"); auto d_strings = column_device_view::create(strings.parent(), stream); auto d_repls = column_device_view::create(replacements.parent(), stream); auto d_char_table = get_character_flags_table(); // compile regexes into device objects size_type regex_insts = 0; std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs; std::vector<reprog_device> progs; for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) { auto prog = reprog_device::create(*itr, flags, d_char_table, strings_count, stream); regex_insts = std::max(regex_insts, prog->insts_counts()); progs.push_back(*prog); h_progs.emplace_back(std::move(prog)); } // copy all the reprog_device instances to a device memory array auto d_progs = cudf::detail::make_device_uvector_async(progs, stream); // create working buffer for ranges pairs rmm::device_uvector<found_range> found_ranges(patterns.size() * strings_count, stream); auto d_found_ranges = found_ranges.data(); // create child columns auto children = [&] { // Each invocation is predicated on the stack size which is dependent on the number of regex // instructions if (regex_insts <= RX_SMALL_INSTS) { replace_multi_regex_fn<RX_STACK_SMALL> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } else if (regex_insts <= RX_MEDIUM_INSTS) { replace_multi_regex_fn<RX_STACK_MEDIUM> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } else if (regex_insts <= RX_LARGE_INSTS) { replace_multi_regex_fn<RX_STACK_LARGE> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } else { replace_multi_regex_fn<RX_STACK_ANY> fn{*d_strings, d_progs, d_found_ranges, *d_repls}; return make_strings_children(fn, strings_count, stream, mr); } }(); return make_strings_column(strings_count, std::move(children.first), std::move(children.second), strings.null_count(), cudf::detail::copy_bitmask(strings.parent(), stream, mr)); } } // namespace detail // external API std::unique_ptr<column> replace_re(strings_column_view const& strings, std::vector<std::string> const& patterns, strings_column_view const& replacements, regex_flags const flags, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::replace_re(strings, patterns, replacements, flags, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
3744b4593b9a163f4c86e7e400f41f03a65964c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> #include <ATen/native/hip/LaunchUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = bottom_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[channels*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[i] = 0; } __syncthreads(); top_data = top_data + blockIdx.x * pooled_height * pooled_width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; bottom_data = bottom_data + blockIdx.x * channels * height * width; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int hstart = oh * stride_h - pad_h; int wstart = ow * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { scalar_t val = ptr_input[c]; if ((scalar_cast<accscalar_t>(val) > out_cached[c]) || THCNumerics<scalar_t>::isnan(val)) { out_cached[c] = scalar_cast<accscalar_t>(val); out_mask_cached[c] = ih * width + iw; } } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { ptr_output_data[c] = out_cached[c]; ptr_output_mask[c] = out_mask_cached[c]; out_cached[c] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[c] = 0; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index/width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]); } } } } else { if (top_mask[phstart * pooled_width + pwstart] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]); } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* bottom_diff) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; bottom_diff = bottom_diff + blockIdx.x * height * width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; top_diff = top_diff + blockIdx.x * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { out_cached[c] += scalar_cast<scalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c]); } } } } scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { ptr_bottom_diff[c] = out_cached[c]; out_cached[c] = scalar_t(0.0); } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c]); } } } } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); hipLaunchKernelGGL(( max_pool_forward_nhwc<scalar_t, scalar_t>) , dim3(grid), dim3(block), nInputPlane * block_y * block_z * (sizeof(int) + sizeof(scalar_t)), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_c, in_stride_h, in_stride_w, output_data, indices_data); break; } case MemoryFormat::Contiguous: { const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); hipLaunchKernelGGL(( max_pool_forward_nchw<scalar_t, scalar_t>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "max_pool2d_with_indices_out_cuda_frame failed with error code ", hipGetLastError()); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.resize_as_(input); gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); hipLaunchKernelGGL(( max_pool_backward_nhwc<scalar_t, accscalar_t>) , dim3(grid), dim3(block), nInputPlane * block_y * block_z * sizeof(scalar_t), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_c, in_stride_h, in_stride_w, gradInput_data); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; hipLaunchKernelGGL(( max_pool_backward_nchw<scalar_t, accscalar_t>) , dim3(grid), dim3(BLOCK_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "fractional_max_pool2d_backward_out_cuda failed with error code ", hipGetLastError()); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input, at::MemoryFormat::Contiguous); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
3744b4593b9a163f4c86e7e400f41f03a65964c0.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = bottom_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[channels*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[i] = 0; } __syncthreads(); top_data = top_data + blockIdx.x * pooled_height * pooled_width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; bottom_data = bottom_data + blockIdx.x * channels * height * width; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int hstart = oh * stride_h - pad_h; int wstart = ow * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { scalar_t val = ptr_input[c]; if ((scalar_cast<accscalar_t>(val) > out_cached[c]) || THCNumerics<scalar_t>::isnan(val)) { out_cached[c] = scalar_cast<accscalar_t>(val); out_mask_cached[c] = ih * width + iw; } } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; for(int c = threadIdx.x; c < channels; c+= blockDim.x) { ptr_output_data[c] = out_cached[c]; ptr_output_mask[c] = out_mask_cached[c]; out_cached[c] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[c] = 0; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index/width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]); } } } } else { if (top_mask[phstart * pooled_width + pwstart] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]); } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_c, const int in_stride_h, const int in_stride_w, scalar_t* bottom_diff) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; for (int i = thread_id; i < channels*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * channels]; bottom_diff = bottom_diff + blockIdx.x * height * width * channels; top_mask = top_mask + blockIdx.x * pooled_height * pooled_width * channels; top_diff = top_diff + blockIdx.x * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { out_cached[c] += scalar_cast<scalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c]); } } } } scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { ptr_bottom_diff[c] = out_cached[c]; out_cached[c] = scalar_t(0.0); } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + (ih * width + iw) * channels; for (int c = threadIdx.x; c < channels; c += blockDim.x) { if (ptr_top_mask[c] == ih * width + iw) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c]); } } } } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); max_pool_forward_nhwc<scalar_t, scalar_t> <<<grid, block, nInputPlane * block_y * block_z * (sizeof(int) + sizeof(scalar_t)), at::cuda::getCurrentCUDAStream()>>>( input_data, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_c, in_stride_h, in_stride_w, output_data, indices_data); break; } case MemoryFormat::Contiguous: { const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); max_pool_forward_nchw<scalar_t, scalar_t> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "max_pool2d_with_indices_out_cuda_frame failed with error code ", cudaGetLastError()); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.resize_as_(input); gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int grid_x = nbatch; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); max_pool_backward_nhwc<scalar_t, accscalar_t> <<<grid, block, nInputPlane * block_y * block_z * sizeof(scalar_t), at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_c, in_stride_h, in_stride_w, gradInput_data); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; max_pool_backward_nchw<scalar_t, accscalar_t> <<<grid, BLOCK_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "fractional_max_pool2d_backward_out_cuda failed with error code ", cudaGetLastError()); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input, at::MemoryFormat::Contiguous); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
678a33a5c264697be792aa3af7e689ce978bbbaa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <dcgn/dcgn.h> #include <dcgn/CUDAFunctions.h> __global__ void gpuKernel(const int gpuID) { } __host__ void gpuKernelWrapper(void * number, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, hipStream_t stream) { }
678a33a5c264697be792aa3af7e689ce978bbbaa.cu
#include <dcgn/dcgn.h> #include <dcgn/CUDAFunctions.h> __global__ void gpuKernel(const int gpuID) { } __host__ void gpuKernelWrapper(void * number, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, cudaStream_t stream) { }
f49b57125202252963635ef13d533c5d372e47d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _TTPROJECTRAY_KERNEL_CU_ #define _TTPROJECTRAY_KERNEL_CU_ #include <cutil_inline.h> #include <cutil_math.h> #include <tt_project_ray_gpu.h> #define MAX_STEPS 10000 hipArray *d_volumeArray = 0; texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_tt_project_ray(float *d_output, float3 sourcePosition, float3 volumeSize, uint imageWidthPixels, uint imageHeightPixels, float tStep) { const uint image_width_pixels = imageWidthPixels; const uint image_height_pixels = imageHeightPixels; const float3 volume_size = volumeSize; const float3 source_position = sourcePosition; const float tstep = tStep; const int maxSteps = MAX_STEPS; //(volume_size.x^2+volume_size.y^2+volume_size.z^2)^0.5f/tStep; //diagonal of the bounding box const float3 boxMin = make_float3(0.0f, 0.0f, 0.0f); const float3 boxMax = make_float3(volume_size.x, volume_size.y, volume_size.z); const float3 rec_volume_size = 1.0f/volume_size; //x and y index detector pixel uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= image_width_pixels) || (y >= image_height_pixels)) return; //u and v are in normalized detector pixel [0,0]->[1,1] float u = (x / (float) image_width_pixels); float v = (y / (float) image_height_pixels); // calculate eye ray in world space Ray eyeRay; eyeRay.o = source_position; //transform and normalize direction vector eyeRay.d = normalize(make_float3(mul(c_invViewMatrix, make_float4(u,v,0.0f,1.0f)))-eyeRay.o); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating float sum; float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates //float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); float sample = tex3D(tex, pos.x*rec_volume_size.x, pos.y*rec_volume_size.y, pos.z*rec_volume_size.z); sum = sum + sample; t += tstep; if (t > tfar) break; pos += step; } d_output[y*image_width_pixels + x] = sum; } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; } extern "C" void initCuda(void *h_volume, hipExtent volumeSize) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>(); cutilSafeCall( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; cutilSafeCall( hipMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture cutilSafeCall(hipBindTextureToArray(tex, d_volumeArray, channelDesc)); } extern "C" void freeCudaBuffers() { cutilSafeCall(hipFreeArray(d_volumeArray)); } extern "C" void tt_project_ray_kernel(dim3 gridSize, dim3 blockSize, float *d_output, float3 source_position, float3 volume_size, uint imageW, uint imageH, float t_step) { hipLaunchKernelGGL(( d_tt_project_ray), dim3(gridSize), dim3(blockSize), 0, 0, d_output, source_position, volume_size, imageW, imageH, t_step); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { /* fprintf(stderr,"\nMatrix:"); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[0],invViewMatrix[1],invViewMatrix[3],invViewMatrix[3]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[4],invViewMatrix[5],invViewMatrix[6],invViewMatrix[7]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[8],invViewMatrix[9],invViewMatrix[10],invViewMatrix[11]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[12],invViewMatrix[13],invViewMatrix[14],invViewMatrix[15]);*/ cutilSafeCall( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _TTPROJECTRAY_KERNEL_CU_
f49b57125202252963635ef13d533c5d372e47d4.cu
#ifndef _TTPROJECTRAY_KERNEL_CU_ #define _TTPROJECTRAY_KERNEL_CU_ #include <cutil_inline.h> #include <cutil_math.h> #include <tt_project_ray_gpu.h> #define MAX_STEPS 10000 cudaArray *d_volumeArray = 0; texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __global__ void d_tt_project_ray(float *d_output, float3 sourcePosition, float3 volumeSize, uint imageWidthPixels, uint imageHeightPixels, float tStep) { const uint image_width_pixels = imageWidthPixels; const uint image_height_pixels = imageHeightPixels; const float3 volume_size = volumeSize; const float3 source_position = sourcePosition; const float tstep = tStep; const int maxSteps = MAX_STEPS; //(volume_size.x^2+volume_size.y^2+volume_size.z^2)^0.5f/tStep; //diagonal of the bounding box const float3 boxMin = make_float3(0.0f, 0.0f, 0.0f); const float3 boxMax = make_float3(volume_size.x, volume_size.y, volume_size.z); const float3 rec_volume_size = 1.0f/volume_size; //x and y index detector pixel uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= image_width_pixels) || (y >= image_height_pixels)) return; //u and v are in normalized detector pixel [0,0]->[1,1] float u = (x / (float) image_width_pixels); float v = (y / (float) image_height_pixels); // calculate eye ray in world space Ray eyeRay; eyeRay.o = source_position; //transform and normalize direction vector eyeRay.d = normalize(make_float3(mul(c_invViewMatrix, make_float4(u,v,0.0f,1.0f)))-eyeRay.o); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating float sum; float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for(int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates //float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); float sample = tex3D(tex, pos.x*rec_volume_size.x, pos.y*rec_volume_size.y, pos.z*rec_volume_size.z); sum = sum + sample; t += tstep; if (t > tfar) break; pos += step; } d_output[y*image_width_pixels + x] = sum; } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; } extern "C" void initCuda(void *h_volume, cudaExtent volumeSize) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>(); cutilSafeCall( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture cutilSafeCall(cudaBindTextureToArray(tex, d_volumeArray, channelDesc)); } extern "C" void freeCudaBuffers() { cutilSafeCall(cudaFreeArray(d_volumeArray)); } extern "C" void tt_project_ray_kernel(dim3 gridSize, dim3 blockSize, float *d_output, float3 source_position, float3 volume_size, uint imageW, uint imageH, float t_step) { d_tt_project_ray<<<gridSize, blockSize>>>( d_output, source_position, volume_size, imageW, imageH, t_step); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { /* fprintf(stderr,"\nMatrix:"); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[0],invViewMatrix[1],invViewMatrix[3],invViewMatrix[3]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[4],invViewMatrix[5],invViewMatrix[6],invViewMatrix[7]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[8],invViewMatrix[9],invViewMatrix[10],invViewMatrix[11]); fprintf(stderr,"\n %4.2f %4.2f %4.2f %4.2f ",invViewMatrix[12],invViewMatrix[13],invViewMatrix[14],invViewMatrix[15]);*/ cutilSafeCall( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _TTPROJECTRAY_KERNEL_CU_
36d88871765f2b39a96ed1c72548c14a19181138.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * DataReader.cpp * * Created on: 06.06.2016 * Author: Sebastian Reinhart */ #include "DataReader.cuh" DataReader::DataReader() { hipError_t error; hipStreamCreate(&stream1); hipStreamCreate(&stream0); error = hipHostMalloc((void**) &h_data, NUMBER_LASERRAYS*sizeof(laserdata_raw), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_data, NUMBER_LASERRAYS*sizeof(laserdata_raw)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &dist, NUMBER_LASERRAYS*sizeof(float), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_dist, NUMBER_LASERRAYS*sizeof(float)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_relMeas, MAX_SEGMENTS*3*sizeof(laserdata_cartesian), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_relMeas, MAX_SEGMENTS*3*sizeof(laserdata_cartesian)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &thresh, NUMBER_LASERRAYS*sizeof(float), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_thresh, NUMBER_LASERRAYS*sizeof(float)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &raw_segments, (MAX_SEGMENTS+1)*sizeof(raw_segment), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_rawSegs, (MAX_SEGMENTS+1)*sizeof(raw_segment)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &car_segments, (MAX_SEGMENTS+1)*sizeof(cartesian_segment), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_carSegs, (MAX_SEGMENTS+1)*sizeof(cartesian_segment)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error =hipHostMalloc((void **) &h_minDistance, MAX_SEGMENTS*sizeof(unsigned long long), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void**) &d_minDistance, MAX_SEGMENTS*sizeof(unsigned long long)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } } DataReader::~DataReader() { // Free device memory hipHostFree(h_data); hipHostFree(dist); hipHostFree(thresh); hipHostFree(h_minDistance); hipHostFree(raw_segments); hipHostFree(car_segments); hipHostFree(h_relMeas); hipFree(d_data); hipFree(d_dist); hipFree(d_thresh); hipFree(d_minDistance); hipFree(d_rawSegs); hipFree(d_relMeas); hipFree(d_carSegs); hipStreamDestroy(stream1); hipStreamDestroy(stream0); } __global__ void getRelevantMeas(cartesian_segment* carSegs, laserdata_cartesian* d_laser, unsigned long long* dist) { int index = blockIdx.x*3; d_laser[index] = carSegs[blockIdx.x].measures[0]; d_laser[index+2] = carSegs[blockIdx.x].measures[carSegs[blockIdx.x].numberOfMeasures-1]; unsigned long long tmp; if(threadIdx.x < carSegs[blockIdx.x].numberOfMeasures) { float x = carSegs[blockIdx.x].measures[threadIdx.x].x; float y = carSegs[blockIdx.x].measures[threadIdx.x].y; tmp = sqrtf(x*x + y*y)*10000; atomicMin(&(dist[blockIdx.x]), tmp); __syncthreads(); if(dist[blockIdx.x] == tmp) { d_laser[index+1] = carSegs[blockIdx.x].measures[threadIdx.x]; } } } __device__ float computeEuclideanDistance(laserdata_raw p1, laserdata_raw p2) { float square1 = p1.distance*p1.distance; float square2 = p2.distance*p2.distance; float deltaAlpha = p2.angle-p1.angle; deltaAlpha = (deltaAlpha * ((float)M_PI) / 180.0f); return sqrtf(square1+square2-2*p1.distance*p2.distance*cosf(deltaAlpha)); } __device__ float computeThreshold(laserdata_raw p1, laserdata_raw p2) { float C0 = 1.0f; float C1; float min_distance = p2.distance; float deltaAlpha; deltaAlpha = (0.25f * ((float)M_PI) / 180.0f); if(p1.distance < p2.distance) { min_distance = p1.distance; } C1 = sqrtf(2*(1-cosf(deltaAlpha))); return C0 + C1*min_distance; } __device__ void doCoordinateTransformDevice(raw_segment* rawSegs, cartesian_segment* carSegs, int segIndex, int laserIndex) { carSegs[segIndex].numberOfMeasures = rawSegs[segIndex].numberOfMeasures; float angleInRadians; if(laserIndex < rawSegs[segIndex].numberOfMeasures) { angleInRadians = rawSegs[segIndex].measures[laserIndex].angle * ((float)M_PI) / 180.0f; carSegs[segIndex].measures[laserIndex].x = rawSegs[segIndex].measures[laserIndex].distance*cosf(angleInRadians); carSegs[segIndex].measures[laserIndex].y = rawSegs[segIndex].measures[laserIndex].distance*sinf(angleInRadians); } } __global__ void coordinateTransform(raw_segment* rawSegs, cartesian_segment* carSegs) { doCoordinateTransformDevice(rawSegs, carSegs, blockIdx.x, threadIdx.x); } __global__ void processDist(laserdata_raw* data, float* distance) { distance[threadIdx.x] = computeEuclideanDistance(data[threadIdx.x], data[threadIdx.x + 1]); } __global__ void processThresh(laserdata_raw* data, float* threshold) { threshold[threadIdx.x] = computeThreshold(data[threadIdx.x], data[threadIdx.x + 1]); } /* * Reads the data out of the specified file and writes it to given array * @param * data: array for raw laserdata * @return * 1 if an error occurs * 0 if everything is ok */ int DataReader::getLaserData(laserdata_raw_array data, std::string number) { std::ostringstream measurePath; measurePath << MEASUREPATH << number << ".txt"; #ifdef PRINT std::cout << measurePath.str() << std::endl; #endif std::ifstream input(measurePath.str().c_str()); std::string line; int counter = 0; std::string segment; /* * Laserscanner range 145 with resolution of 0.25 -> 580 values + Zero value = 581 *-72,5 0 72,5 * \ | / * \ | / * \ | / * \ | / * \|/ * Laser */ double angle = -72.5; //now read the data we are interested in counter = 0; int lineCounter = 0; while( std::getline( input, line ) && lineCounter < NUMBER_LASERRAYS ) { //std::cout<<line<<'\n'; std::stringstream ss; ss << line; int datacnt = 1; //extract relevant data from line int valid = 0; while(std::getline(ss, segment, ' ')) { if(segment.size() > 0) { if(segment.at(0) < 48 || segment.at(0) > 57) { continue; } else { if(datacnt == 5) { valid = atoi(segment.c_str()); if(valid == 1) { data[counter].valid = valid; } } else if(datacnt == 6 && valid == 1) { data[counter].distance = atof(segment.c_str()); break; } ++datacnt; } } } if(valid) { data[counter].angle = angle; ++counter; } angle += 0.25; ++lineCounter; } #ifdef PRINT std::cout << counter << std::endl; #endif return counter; } /** * Runs over all laser points and tries to group them to segments, regarding to their euclidean distance to their neighbor point * going from left to right */ int DataReader::processLaserData(std::string number, double currentSpeed, double currentYawRate, PointCellDevice* h_vehicles) { this->currentSpeed = currentSpeed; this->currentYawRate = currentYawRate; //read new data from file int numElements = getLaserData(h_data, number); if(numElements < 3) { std::vector<PointCellDevice> vehicles; return 0; } hipMemcpyAsync(d_data, h_data, numElements*sizeof(laserdata_raw), hipMemcpyHostToDevice,stream1); hipStreamSynchronize(stream1); hipLaunchKernelGGL(( processDist), dim3(1),dim3(numElements-1),0,stream1, d_data, d_dist); hipLaunchKernelGGL(( processThresh), dim3(1), dim3(numElements-1),0, stream0, d_data,d_thresh); for(int i=0; i<MAX_SEGMENTS; i++) { h_minDistance[i] = INT_MAX; } int segment_counter = 0; int data_counter = 0; //first point automatically is part of the first segment; raw_segments[MAX_SEGMENTS].numberOfMeasures = 1; raw_segments[MAX_SEGMENTS].measures[0] = h_data[0]; hipMemcpyAsync(dist, d_dist, (numElements-1)*sizeof(float), hipMemcpyDeviceToHost, stream1); hipMemcpyAsync(thresh, d_thresh, (numElements-1)*sizeof(float), hipMemcpyDeviceToHost, stream0); hipStreamSynchronize(stream1); hipStreamSynchronize(stream0); hipMemcpyAsync(d_minDistance, h_minDistance, MAX_SEGMENTS*sizeof(unsigned long long), hipMemcpyHostToDevice,stream0); //iterate over all measures for(int i=1; i<numElements; i++) { if(dist[i-1] <= thresh[i-1]) { //add current point in existing segment raw_segments[MAX_SEGMENTS].numberOfMeasures++; raw_segments[MAX_SEGMENTS].measures[++data_counter] = h_data[i]; } else { //point belongs to other object -> store current Segment and reset tmp-segment object //only keep segments with at least 3 points if(raw_segments[MAX_SEGMENTS].numberOfMeasures >= 3) { raw_segments[segment_counter].numberOfMeasures = raw_segments[MAX_SEGMENTS].numberOfMeasures; for(int j=0; j<raw_segments[MAX_SEGMENTS].numberOfMeasures; j++) { raw_segments[segment_counter].measures[j] = raw_segments[MAX_SEGMENTS].measures[j]; } segment_counter++; } raw_segments[MAX_SEGMENTS].numberOfMeasures = 1; raw_segments[MAX_SEGMENTS].measures[0] = h_data[i]; data_counter = 0; } } if(raw_segments[MAX_SEGMENTS].numberOfMeasures >= 3) { raw_segments[segment_counter].numberOfMeasures = raw_segments[MAX_SEGMENTS].numberOfMeasures; for(int j=0; j<raw_segments[MAX_SEGMENTS].numberOfMeasures; j++) { raw_segments[segment_counter].measures[j] = raw_segments[MAX_SEGMENTS].measures[j]; } ++segment_counter; } #ifdef PRINT printf("Extracted %d Objects from Laserdata\n", segment_counter); #endif hipMemcpyAsync(d_rawSegs, raw_segments, segment_counter*sizeof(raw_segment), hipMemcpyHostToDevice,stream1); hipLaunchKernelGGL(( coordinateTransform), dim3(segment_counter),dim3(NUMBER_LASERRAYS),0,stream1, d_rawSegs, d_carSegs); hipStreamSynchronize(stream1); hipMemcpyAsync(car_segments, d_carSegs, segment_counter*sizeof(cartesian_segment), hipMemcpyDeviceToHost,stream1); hipLaunchKernelGGL(( getRelevantMeas), dim3(segment_counter), dim3(NUMBER_LASERRAYS),0,stream0, d_carSegs,d_relMeas,d_minDistance); #ifdef VISUALIZE visualizer.visualizeSegmentsAsPointCloud(car_segments, number, segment_counter); #endif hipMemcpyAsync(h_relMeas, d_relMeas, segment_counter*3*sizeof(laserdata_cartesian), hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); hipStreamSynchronize(stream1); int vehicles = computeVehicleState(car_segments, segment_counter, number, h_vehicles); return vehicles; } int DataReader::computeVehicleState(cartesian_segment* segments, int segmentCounter, std::string number, PointCellDevice* h_vehicles) { std::vector<PointCellDevice> vehicles; int counter = 0; laserdata_cartesian* relevantPoints; std::vector<std::vector<laserdata_cartesian> > toPlot; for(uint i=0; i<segmentCounter; i++) { relevantPoints = &h_relMeas[i*3]; //we have three different points, compute bounds int left = 0; //right point - left point double width = fabs(relevantPoints[2].y - relevantPoints[0].y); double length = fabs(relevantPoints[2].x - relevantPoints[0].x); double nearestLengthLeft = fabs(relevantPoints[1].x - relevantPoints[0].x); double nearestLengthRight = fabs(relevantPoints[1].x - relevantPoints[2].x); double nearestWidthLeft = fabs(relevantPoints[1].y - relevantPoints[0].y); double nearestWidthRight = fabs(relevantPoints[1].y - relevantPoints[2].y); //compute orientation of object regarding to the driving direction of our own car //own direction vector(x,y): (1,0) if(length > 2) { length = fabs(relevantPoints[1].x - relevantPoints[0].x); width = fabs(relevantPoints[1].y - relevantPoints[0].y); } double theta = acos(length/(1*sqrt(width*width + length*length))) * 180.0 / M_PI; double thetaLeft = acos(nearestLengthLeft/(1*sqrt(nearestWidthLeft*nearestWidthLeft + nearestLengthLeft*nearestLengthLeft))) * 180.0 / M_PI; double thetaRight = acos(nearestLengthRight/(1*sqrt(nearestWidthRight*nearestWidthRight + nearestLengthRight*nearestLengthRight))) * 180.0 / M_PI; //objects should not be classified as vehicle if their orientation is bigger than 45 //real vehicles should never be rotated over that value //the detected car probably is defined with the points that form the biggest angle and are wider than 1m int points = 0; if(thetaLeft + 5 > theta && nearestWidthLeft > 1) { theta = thetaLeft; length = nearestLengthLeft; width = nearestWidthLeft; points = 1; } if(thetaRight + 5 > theta && nearestWidthRight > 1) { theta = thetaRight; length = nearestLengthRight; width = nearestWidthRight; left = 1; points = 2; } if(theta > 60 && width > 1) { double y = relevantPoints[left].y + width/2; h_vehicles[counter].initializeMemory(); h_vehicles[counter].setX(relevantPoints[left].x + length/2);//x h_vehicles[counter].setY(relevantPoints[left].y + width/2); // y //now compute theta regarding to movement direction switch(points) { case 0: width = (relevantPoints[2].y - relevantPoints[0].y); length = (relevantPoints[2].x - relevantPoints[0].x); break; case 1: length = (relevantPoints[1].x - relevantPoints[0].x); width = (relevantPoints[1].y - relevantPoints[0].y); break; case 2: length = (relevantPoints[1].x - relevantPoints[2].x); width = (relevantPoints[1].y - relevantPoints[2].y); break; } theta = atan(width/length); h_vehicles[counter].setTheta(theta*M_PI / 180.0); //theta //due to prior knowledge on highways, velocitys for diffrent lanes are estimated as below if(y < -4.5) { h_vehicles[counter].setVelocity(currentSpeed + 11.11); //velocity + 40kmh } else if(y < -1.5) { h_vehicles[counter].setVelocity(currentSpeed + 5.55); //velocity + 20kmh } else if(y > 4.5) { h_vehicles[counter].setVelocity(currentSpeed - 11.11); //velocity - 40kmh } else if(y > 1.5) { h_vehicles[counter].setVelocity(currentSpeed - 5.55); //velocity - 20kmh } else { h_vehicles[counter].setVelocity(currentSpeed); //velocity } h_vehicles[counter].setPhi(currentYawRate); //yaw rate h_vehicles[counter].subInvtl = 0.5; ++counter; std::vector<laserdata_cartesian> tmp; tmp.push_back(relevantPoints[0]); tmp.push_back(relevantPoints[1]); tmp.push_back(relevantPoints[2]); toPlot.push_back(tmp); } } #ifdef PRINT std::cout<<"Extracted " << counter << " Vehicles from Data" << std::endl; #endif #ifdef VISUALIZE visualizer.visualizeVehiclesAsRectangle(toPlot, number); #endif return counter; }
36d88871765f2b39a96ed1c72548c14a19181138.cu
/* * DataReader.cpp * * Created on: 06.06.2016 * Author: Sebastian Reinhart */ #include "DataReader.cuh" DataReader::DataReader() { cudaError_t error; cudaStreamCreate(&stream1); cudaStreamCreate(&stream0); error = cudaHostAlloc((void**) &h_data, NUMBER_LASERRAYS*sizeof(laserdata_raw), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_data, NUMBER_LASERRAYS*sizeof(laserdata_raw)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &dist, NUMBER_LASERRAYS*sizeof(float), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_dist, NUMBER_LASERRAYS*sizeof(float)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_relMeas, MAX_SEGMENTS*3*sizeof(laserdata_cartesian), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_relMeas, MAX_SEGMENTS*3*sizeof(laserdata_cartesian)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &thresh, NUMBER_LASERRAYS*sizeof(float), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_thresh, NUMBER_LASERRAYS*sizeof(float)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &raw_segments, (MAX_SEGMENTS+1)*sizeof(raw_segment), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_rawSegs, (MAX_SEGMENTS+1)*sizeof(raw_segment)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &car_segments, (MAX_SEGMENTS+1)*sizeof(cartesian_segment), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_carSegs, (MAX_SEGMENTS+1)*sizeof(cartesian_segment)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error =cudaHostAlloc((void **) &h_minDistance, MAX_SEGMENTS*sizeof(unsigned long long), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void**) &d_minDistance, MAX_SEGMENTS*sizeof(unsigned long long)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } } DataReader::~DataReader() { // Free device memory cudaFreeHost(h_data); cudaFreeHost(dist); cudaFreeHost(thresh); cudaFreeHost(h_minDistance); cudaFreeHost(raw_segments); cudaFreeHost(car_segments); cudaFreeHost(h_relMeas); cudaFree(d_data); cudaFree(d_dist); cudaFree(d_thresh); cudaFree(d_minDistance); cudaFree(d_rawSegs); cudaFree(d_relMeas); cudaFree(d_carSegs); cudaStreamDestroy(stream1); cudaStreamDestroy(stream0); } __global__ void getRelevantMeas(cartesian_segment* carSegs, laserdata_cartesian* d_laser, unsigned long long* dist) { int index = blockIdx.x*3; d_laser[index] = carSegs[blockIdx.x].measures[0]; d_laser[index+2] = carSegs[blockIdx.x].measures[carSegs[blockIdx.x].numberOfMeasures-1]; unsigned long long tmp; if(threadIdx.x < carSegs[blockIdx.x].numberOfMeasures) { float x = carSegs[blockIdx.x].measures[threadIdx.x].x; float y = carSegs[blockIdx.x].measures[threadIdx.x].y; tmp = sqrtf(x*x + y*y)*10000; atomicMin(&(dist[blockIdx.x]), tmp); __syncthreads(); if(dist[blockIdx.x] == tmp) { d_laser[index+1] = carSegs[blockIdx.x].measures[threadIdx.x]; } } } __device__ float computeEuclideanDistance(laserdata_raw p1, laserdata_raw p2) { float square1 = p1.distance*p1.distance; float square2 = p2.distance*p2.distance; float deltaAlpha = p2.angle-p1.angle; deltaAlpha = (deltaAlpha * ((float)M_PI) / 180.0f); return sqrtf(square1+square2-2*p1.distance*p2.distance*cosf(deltaAlpha)); } __device__ float computeThreshold(laserdata_raw p1, laserdata_raw p2) { float C0 = 1.0f; float C1; float min_distance = p2.distance; float deltaAlpha; deltaAlpha = (0.25f * ((float)M_PI) / 180.0f); if(p1.distance < p2.distance) { min_distance = p1.distance; } C1 = sqrtf(2*(1-cosf(deltaAlpha))); return C0 + C1*min_distance; } __device__ void doCoordinateTransformDevice(raw_segment* rawSegs, cartesian_segment* carSegs, int segIndex, int laserIndex) { carSegs[segIndex].numberOfMeasures = rawSegs[segIndex].numberOfMeasures; float angleInRadians; if(laserIndex < rawSegs[segIndex].numberOfMeasures) { angleInRadians = rawSegs[segIndex].measures[laserIndex].angle * ((float)M_PI) / 180.0f; carSegs[segIndex].measures[laserIndex].x = rawSegs[segIndex].measures[laserIndex].distance*cosf(angleInRadians); carSegs[segIndex].measures[laserIndex].y = rawSegs[segIndex].measures[laserIndex].distance*sinf(angleInRadians); } } __global__ void coordinateTransform(raw_segment* rawSegs, cartesian_segment* carSegs) { doCoordinateTransformDevice(rawSegs, carSegs, blockIdx.x, threadIdx.x); } __global__ void processDist(laserdata_raw* data, float* distance) { distance[threadIdx.x] = computeEuclideanDistance(data[threadIdx.x], data[threadIdx.x + 1]); } __global__ void processThresh(laserdata_raw* data, float* threshold) { threshold[threadIdx.x] = computeThreshold(data[threadIdx.x], data[threadIdx.x + 1]); } /* * Reads the data out of the specified file and writes it to given array * @param * data: array for raw laserdata * @return * 1 if an error occurs * 0 if everything is ok */ int DataReader::getLaserData(laserdata_raw_array data, std::string number) { std::ostringstream measurePath; measurePath << MEASUREPATH << number << ".txt"; #ifdef PRINT std::cout << measurePath.str() << std::endl; #endif std::ifstream input(measurePath.str().c_str()); std::string line; int counter = 0; std::string segment; /* * Laserscanner range 145° with resolution of 0.25° -> 580 values + Zero value = 581 *-72,5° 0° 72,5° * \ | / * \ | / * \ | / * \ | / * \|/ * Laser */ double angle = -72.5; //now read the data we are interested in counter = 0; int lineCounter = 0; while( std::getline( input, line ) && lineCounter < NUMBER_LASERRAYS ) { //std::cout<<line<<'\n'; std::stringstream ss; ss << line; int datacnt = 1; //extract relevant data from line int valid = 0; while(std::getline(ss, segment, ' ')) { if(segment.size() > 0) { if(segment.at(0) < 48 || segment.at(0) > 57) { continue; } else { if(datacnt == 5) { valid = atoi(segment.c_str()); if(valid == 1) { data[counter].valid = valid; } } else if(datacnt == 6 && valid == 1) { data[counter].distance = atof(segment.c_str()); break; } ++datacnt; } } } if(valid) { data[counter].angle = angle; ++counter; } angle += 0.25; ++lineCounter; } #ifdef PRINT std::cout << counter << std::endl; #endif return counter; } /** * Runs over all laser points and tries to group them to segments, regarding to their euclidean distance to their neighbor point * going from left to right */ int DataReader::processLaserData(std::string number, double currentSpeed, double currentYawRate, PointCellDevice* h_vehicles) { this->currentSpeed = currentSpeed; this->currentYawRate = currentYawRate; //read new data from file int numElements = getLaserData(h_data, number); if(numElements < 3) { std::vector<PointCellDevice> vehicles; return 0; } cudaMemcpyAsync(d_data, h_data, numElements*sizeof(laserdata_raw), cudaMemcpyHostToDevice,stream1); cudaStreamSynchronize(stream1); processDist<<<1,numElements-1,0,stream1>>>(d_data, d_dist); processThresh<<<1, numElements-1,0, stream0>>>(d_data,d_thresh); for(int i=0; i<MAX_SEGMENTS; i++) { h_minDistance[i] = INT_MAX; } int segment_counter = 0; int data_counter = 0; //first point automatically is part of the first segment; raw_segments[MAX_SEGMENTS].numberOfMeasures = 1; raw_segments[MAX_SEGMENTS].measures[0] = h_data[0]; cudaMemcpyAsync(dist, d_dist, (numElements-1)*sizeof(float), cudaMemcpyDeviceToHost, stream1); cudaMemcpyAsync(thresh, d_thresh, (numElements-1)*sizeof(float), cudaMemcpyDeviceToHost, stream0); cudaStreamSynchronize(stream1); cudaStreamSynchronize(stream0); cudaMemcpyAsync(d_minDistance, h_minDistance, MAX_SEGMENTS*sizeof(unsigned long long), cudaMemcpyHostToDevice,stream0); //iterate over all measures for(int i=1; i<numElements; i++) { if(dist[i-1] <= thresh[i-1]) { //add current point in existing segment raw_segments[MAX_SEGMENTS].numberOfMeasures++; raw_segments[MAX_SEGMENTS].measures[++data_counter] = h_data[i]; } else { //point belongs to other object -> store current Segment and reset tmp-segment object //only keep segments with at least 3 points if(raw_segments[MAX_SEGMENTS].numberOfMeasures >= 3) { raw_segments[segment_counter].numberOfMeasures = raw_segments[MAX_SEGMENTS].numberOfMeasures; for(int j=0; j<raw_segments[MAX_SEGMENTS].numberOfMeasures; j++) { raw_segments[segment_counter].measures[j] = raw_segments[MAX_SEGMENTS].measures[j]; } segment_counter++; } raw_segments[MAX_SEGMENTS].numberOfMeasures = 1; raw_segments[MAX_SEGMENTS].measures[0] = h_data[i]; data_counter = 0; } } if(raw_segments[MAX_SEGMENTS].numberOfMeasures >= 3) { raw_segments[segment_counter].numberOfMeasures = raw_segments[MAX_SEGMENTS].numberOfMeasures; for(int j=0; j<raw_segments[MAX_SEGMENTS].numberOfMeasures; j++) { raw_segments[segment_counter].measures[j] = raw_segments[MAX_SEGMENTS].measures[j]; } ++segment_counter; } #ifdef PRINT printf("Extracted %d Objects from Laserdata\n", segment_counter); #endif cudaMemcpyAsync(d_rawSegs, raw_segments, segment_counter*sizeof(raw_segment), cudaMemcpyHostToDevice,stream1); coordinateTransform<<<segment_counter,NUMBER_LASERRAYS,0,stream1>>>(d_rawSegs, d_carSegs); cudaStreamSynchronize(stream1); cudaMemcpyAsync(car_segments, d_carSegs, segment_counter*sizeof(cartesian_segment), cudaMemcpyDeviceToHost,stream1); getRelevantMeas<<<segment_counter, NUMBER_LASERRAYS,0,stream0>>>(d_carSegs,d_relMeas,d_minDistance); #ifdef VISUALIZE visualizer.visualizeSegmentsAsPointCloud(car_segments, number, segment_counter); #endif cudaMemcpyAsync(h_relMeas, d_relMeas, segment_counter*3*sizeof(laserdata_cartesian), cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); int vehicles = computeVehicleState(car_segments, segment_counter, number, h_vehicles); return vehicles; } int DataReader::computeVehicleState(cartesian_segment* segments, int segmentCounter, std::string number, PointCellDevice* h_vehicles) { std::vector<PointCellDevice> vehicles; int counter = 0; laserdata_cartesian* relevantPoints; std::vector<std::vector<laserdata_cartesian> > toPlot; for(uint i=0; i<segmentCounter; i++) { relevantPoints = &h_relMeas[i*3]; //we have three different points, compute bounds int left = 0; //right point - left point double width = fabs(relevantPoints[2].y - relevantPoints[0].y); double length = fabs(relevantPoints[2].x - relevantPoints[0].x); double nearestLengthLeft = fabs(relevantPoints[1].x - relevantPoints[0].x); double nearestLengthRight = fabs(relevantPoints[1].x - relevantPoints[2].x); double nearestWidthLeft = fabs(relevantPoints[1].y - relevantPoints[0].y); double nearestWidthRight = fabs(relevantPoints[1].y - relevantPoints[2].y); //compute orientation of object regarding to the driving direction of our own car //own direction vector(x,y): (1,0) if(length > 2) { length = fabs(relevantPoints[1].x - relevantPoints[0].x); width = fabs(relevantPoints[1].y - relevantPoints[0].y); } double theta = acos(length/(1*sqrt(width*width + length*length))) * 180.0 / M_PI; double thetaLeft = acos(nearestLengthLeft/(1*sqrt(nearestWidthLeft*nearestWidthLeft + nearestLengthLeft*nearestLengthLeft))) * 180.0 / M_PI; double thetaRight = acos(nearestLengthRight/(1*sqrt(nearestWidthRight*nearestWidthRight + nearestLengthRight*nearestLengthRight))) * 180.0 / M_PI; //objects should not be classified as vehicle if their orientation is bigger than 45° //real vehicles should never be rotated over that value //the detected car probably is defined with the points that form the biggest angle and are wider than 1m int points = 0; if(thetaLeft + 5 > theta && nearestWidthLeft > 1) { theta = thetaLeft; length = nearestLengthLeft; width = nearestWidthLeft; points = 1; } if(thetaRight + 5 > theta && nearestWidthRight > 1) { theta = thetaRight; length = nearestLengthRight; width = nearestWidthRight; left = 1; points = 2; } if(theta > 60 && width > 1) { double y = relevantPoints[left].y + width/2; h_vehicles[counter].initializeMemory(); h_vehicles[counter].setX(relevantPoints[left].x + length/2);//x h_vehicles[counter].setY(relevantPoints[left].y + width/2); // y //now compute theta regarding to movement direction switch(points) { case 0: width = (relevantPoints[2].y - relevantPoints[0].y); length = (relevantPoints[2].x - relevantPoints[0].x); break; case 1: length = (relevantPoints[1].x - relevantPoints[0].x); width = (relevantPoints[1].y - relevantPoints[0].y); break; case 2: length = (relevantPoints[1].x - relevantPoints[2].x); width = (relevantPoints[1].y - relevantPoints[2].y); break; } theta = atan(width/length); h_vehicles[counter].setTheta(theta*M_PI / 180.0); //theta //due to prior knowledge on highways, velocitys for diffrent lanes are estimated as below if(y < -4.5) { h_vehicles[counter].setVelocity(currentSpeed + 11.11); //velocity + 40kmh } else if(y < -1.5) { h_vehicles[counter].setVelocity(currentSpeed + 5.55); //velocity + 20kmh } else if(y > 4.5) { h_vehicles[counter].setVelocity(currentSpeed - 11.11); //velocity - 40kmh } else if(y > 1.5) { h_vehicles[counter].setVelocity(currentSpeed - 5.55); //velocity - 20kmh } else { h_vehicles[counter].setVelocity(currentSpeed); //velocity } h_vehicles[counter].setPhi(currentYawRate); //yaw rate h_vehicles[counter].subInvtl = 0.5; ++counter; std::vector<laserdata_cartesian> tmp; tmp.push_back(relevantPoints[0]); tmp.push_back(relevantPoints[1]); tmp.push_back(relevantPoints[2]); toPlot.push_back(tmp); } } #ifdef PRINT std::cout<<"Extracted " << counter << " Vehicles from Data" << std::endl; #endif #ifdef VISUALIZE visualizer.visualizeVehiclesAsRectangle(toPlot, number); #endif return counter; }
2b328b40fffb93f33e83106f77e4a0065247f20a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dataset_read.h" #include "../common/common.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <algorithm> #include <set> #include <random> #include <hip/hip_fp16.h> #include "OpenGLEngine.hpp" vector<float> getData(vector<float> trainImages, int idx, int size) { vector<float> tempVec; int start = idx*size; for (int i = start; i < start + size; i++) { tempVec.push_back(trainImages[i]); } return tempVec; } void initializeMeans(vector<float> &trainImages, float* meansGPU, int trainSize, int k, int dim) { //initialize kmeans from the training set std::random_device rd; std::mt19937 eng(rd()); std::uniform_int_distribution<> distr(0, trainSize - 1); // define the range set<int> generated; for (int i = 0; i < k;) { int index = distr(eng); if (generated.find(index) != generated.end()) { continue; } generated.insert(index); //printf("Random Index Generated is : %d \n", index); vector<float> tempVec = getData(trainImages, index, dim); CHECK(hipMemcpy(meansGPU + i*dim, tempVec.data(), dim*sizeof(float), hipMemcpyHostToDevice)); i++; } } __device__ float calcDistance(float* p1, half* p2,int c,int len) { float dist = 0.0f; for (int i = 0; i < len; i++) { float pp = (__half2float(p2[c*len+i]) - (*(p1 + i))); dist += pp*pp; } return dist; } __global__ void cluster_assignment(float* trainImagesGPU, int trainSize, float* meansGPU, float* sumMeans, int k, float* counts, int dim,int* labelGPU) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= trainSize) return; // hard coded constant-->change __shared__ half cluster_centers[7840]; for (int i = threadIdx.x; i < k*dim; i += blockDim.x) { cluster_centers[i] = __float2half(meansGPU[i]); } __syncthreads(); float *base_pointer = trainImagesGPU + index*dim; float min_distance = FLT_MAX; int closest_cluster = -1; for (int clstr = 0; clstr < k; clstr++) { float euclid_dist = calcDistance(base_pointer,cluster_centers,clstr,dim); if (euclid_dist < min_distance) { min_distance = euclid_dist; closest_cluster = clstr; } } labelGPU[index] = closest_cluster; for (int i = 0; i < dim; i++) { atomicAdd(sumMeans + closest_cluster*dim + i, *(base_pointer + i)); } atomicAdd(counts + closest_cluster, 1.0); } __global__ void compute_means(float* means, float* sum_means, float* counts, int dim) { int cluster = threadIdx.x; float count = max(1.0f, counts[cluster]); //printf(" The count for the cluster : %d is %lf \n", cluster, count); for (int i = 0; i < dim; i++) { means[cluster*dim + i] = (sum_means[cluster*dim + i] / count); } } void read_Data_random(vector<float> &x1, char* fname) { std::cout << fname << " is being read" << std::endl; string line; ifstream myfile(fname); long long num = 3000000; int i = 0; string::size_type sz; if (myfile.is_open()) { while (getline(myfile, line, ',')) { x1.push_back(stof(line, &sz)); i++; if (i == num) { myfile.close(); break; } } myfile.close(); } } int main(int argc, char **argv) { system("dir"); int num = 3000000; char* fname1 = "Data_random2/point1.txt/point1.txt"; char* fname2 = "Data_random2/point2.txt/point2.txt"; char* fname3 = "Data_random2/point3.txt/point3.txt"; printf("There\n"); vector<float>x1; vector<float>x2; vector<float>x3; printf("...\n"); read_Data_random(x1,fname1); read_Data_random(x2,fname2); read_Data_random(x3,fname3); //for (int i = 0; i < num; i++) //cout <<"Value is"<< x1[i]<<"\n"; x1.insert(x1.end(), x2.begin(), x2.end()); x1.insert(x1.end(), x3.begin(), x3.end()); cout << "Value has been read"; printf("There\n"); // use std::vector::data to access the pointer for hipMalloc vector<float> trainImages; vector<float> testImages; // Use absolute path to your data folder here. //ReadMNIST("./data/train-images.idx3-ubyte", trainSize, dim, trainImages); //ReadMNIST("./data/t10k-images.idx3-ubyte", testSize, dim , testImages); //vector<short> trainLabels; //vector<short> testLabels; //ReadLabels("./data/train-labels.idx1-ubyte", trainSize, trainLabels); //ReadLabels("./data/t10k-labels.idx1-ubyte", testSize, testLabels); float* trainImagesGPU; float* meansGPU; int* labelGPU; int* labelCPU; float* sumMeans; float* counts; float* meansCPU; CHECK(hipMalloc(&trainImagesGPU, trainSize*dim*sizeof(float))); CHECK(hipMalloc(&labelGPU, trainSize*sizeof(int))); CHECK(hipMemcpy(trainImagesGPU, x1.data(), trainSize*dim*sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMalloc(&meansGPU, k*dim*sizeof(float))); meansCPU = (float*)malloc(k*dim * sizeof(float)); labelCPU = (int*)malloc(trainSize * sizeof(int)); printf("Yay\n"); initializeMeans(x1, meansGPU, trainSize, k, dim); for (int i = 0; i < trainSize*3; i+=3) { dataContainer.push_back(std::make_tuple(x1[i], x1[i + 1], x1[i + 2])); } CHECK(hipMalloc(&sumMeans, k*dim * sizeof(float))); CHECK(hipMalloc(&counts, k*sizeof(float))); dim3 block(1024); dim3 grid((trainSize + block.x - 1) / block.x); clock_t start = clock(); for (int itr = 0; itr < number_of_iterations; itr++) { hipMemset(sumMeans, 0, k*dim*sizeof(float)); hipMemset(counts, 0, k*sizeof(float)); cluster_assignment << <grid, block >> > (trainImagesGPU, trainSize, meansGPU, sumMeans, k, counts, dim,labelGPU); CHECK(hipDeviceSynchronize()); compute_means << <1, k >> > (meansGPU, sumMeans, counts, dim); CHECK(hipDeviceSynchronize()); //if (itr % 10 == 0) //{ CHECK(hipMemcpy(meansCPU, meansGPU, k*dim * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(labelCPU, labelGPU, trainSize * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < num; i++) { assignmentContainer.push_back(labelCPU[i]); } printf(" The assignmnet container size is : %lld \n", assignmentContainer.size()); cout << " iteration is" << itr; //} } printf(" The assignmnet container size is : %lld \n", assignmentContainer.size()); /* CHECK(hipMemcpy(meansCPU, meansGPU, k*dim * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(labelCPU, labelGPU, trainSize * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < num; i++) assignmentContainer.push_back(labelCPU[i]);*/ CHECK(hipMemcpy(meansCPU, meansGPU, k*dim * sizeof(float), hipMemcpyDeviceToHost)); printf("first center is %f %f %f", meansCPU[0], meansCPU[1], meansCPU[2]); printf("second center is %f %f %f", meansCPU[3], meansCPU[4], meansCPU[5]); printf("third center is %f %f %f", meansCPU[6], meansCPU[7], meansCPU[8]); printf("time elapsed:%.8lfs\n\n", (clock() - start) / (double)CLOCKS_PER_SEC); printf("K-means are computed\n"); CHECK(hipDeviceSynchronize()); //computing PCA by SVD with CuSolver printf("Starting up graphics controller\n"); GraphicsController graphics(1920,1080); graphics.initGL(&argc, argv); graphics.run(); CHECK(hipDeviceReset()); printf("Program completed executing\n"); return 0; }
2b328b40fffb93f33e83106f77e4a0065247f20a.cu
#include "dataset_read.h" #include "../common/common.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <algorithm> #include <set> #include <random> #include <cuda_fp16.h> #include "OpenGLEngine.hpp" vector<float> getData(vector<float> trainImages, int idx, int size) { vector<float> tempVec; int start = idx*size; for (int i = start; i < start + size; i++) { tempVec.push_back(trainImages[i]); } return tempVec; } void initializeMeans(vector<float> &trainImages, float* meansGPU, int trainSize, int k, int dim) { //initialize kmeans from the training set std::random_device rd; std::mt19937 eng(rd()); std::uniform_int_distribution<> distr(0, trainSize - 1); // define the range set<int> generated; for (int i = 0; i < k;) { int index = distr(eng); if (generated.find(index) != generated.end()) { continue; } generated.insert(index); //printf("Random Index Generated is : %d \n", index); vector<float> tempVec = getData(trainImages, index, dim); CHECK(cudaMemcpy(meansGPU + i*dim, tempVec.data(), dim*sizeof(float), cudaMemcpyHostToDevice)); i++; } } __device__ float calcDistance(float* p1, half* p2,int c,int len) { float dist = 0.0f; for (int i = 0; i < len; i++) { float pp = (__half2float(p2[c*len+i]) - (*(p1 + i))); dist += pp*pp; } return dist; } __global__ void cluster_assignment(float* trainImagesGPU, int trainSize, float* meansGPU, float* sumMeans, int k, float* counts, int dim,int* labelGPU) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= trainSize) return; // hard coded constant-->change __shared__ half cluster_centers[7840]; for (int i = threadIdx.x; i < k*dim; i += blockDim.x) { cluster_centers[i] = __float2half(meansGPU[i]); } __syncthreads(); float *base_pointer = trainImagesGPU + index*dim; float min_distance = FLT_MAX; int closest_cluster = -1; for (int clstr = 0; clstr < k; clstr++) { float euclid_dist = calcDistance(base_pointer,cluster_centers,clstr,dim); if (euclid_dist < min_distance) { min_distance = euclid_dist; closest_cluster = clstr; } } labelGPU[index] = closest_cluster; for (int i = 0; i < dim; i++) { atomicAdd(sumMeans + closest_cluster*dim + i, *(base_pointer + i)); } atomicAdd(counts + closest_cluster, 1.0); } __global__ void compute_means(float* means, float* sum_means, float* counts, int dim) { int cluster = threadIdx.x; float count = max(1.0f, counts[cluster]); //printf(" The count for the cluster : %d is %lf \n", cluster, count); for (int i = 0; i < dim; i++) { means[cluster*dim + i] = (sum_means[cluster*dim + i] / count); } } void read_Data_random(vector<float> &x1, char* fname) { std::cout << fname << " is being read" << std::endl; string line; ifstream myfile(fname); long long num = 3000000; int i = 0; string::size_type sz; if (myfile.is_open()) { while (getline(myfile, line, ',')) { x1.push_back(stof(line, &sz)); i++; if (i == num) { myfile.close(); break; } } myfile.close(); } } int main(int argc, char **argv) { system("dir"); int num = 3000000; char* fname1 = "Data_random2/point1.txt/point1.txt"; char* fname2 = "Data_random2/point2.txt/point2.txt"; char* fname3 = "Data_random2/point3.txt/point3.txt"; printf("There\n"); vector<float>x1; vector<float>x2; vector<float>x3; printf("...\n"); read_Data_random(x1,fname1); read_Data_random(x2,fname2); read_Data_random(x3,fname3); //for (int i = 0; i < num; i++) //cout <<"Value is"<< x1[i]<<"\n"; x1.insert(x1.end(), x2.begin(), x2.end()); x1.insert(x1.end(), x3.begin(), x3.end()); cout << "Value has been read"; printf("There\n"); // use std::vector::data to access the pointer for cudaMalloc vector<float> trainImages; vector<float> testImages; // Use absolute path to your data folder here. //ReadMNIST("./data/train-images.idx3-ubyte", trainSize, dim, trainImages); //ReadMNIST("./data/t10k-images.idx3-ubyte", testSize, dim , testImages); //vector<short> trainLabels; //vector<short> testLabels; //ReadLabels("./data/train-labels.idx1-ubyte", trainSize, trainLabels); //ReadLabels("./data/t10k-labels.idx1-ubyte", testSize, testLabels); float* trainImagesGPU; float* meansGPU; int* labelGPU; int* labelCPU; float* sumMeans; float* counts; float* meansCPU; CHECK(cudaMalloc(&trainImagesGPU, trainSize*dim*sizeof(float))); CHECK(cudaMalloc(&labelGPU, trainSize*sizeof(int))); CHECK(cudaMemcpy(trainImagesGPU, x1.data(), trainSize*dim*sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMalloc(&meansGPU, k*dim*sizeof(float))); meansCPU = (float*)malloc(k*dim * sizeof(float)); labelCPU = (int*)malloc(trainSize * sizeof(int)); printf("Yay\n"); initializeMeans(x1, meansGPU, trainSize, k, dim); for (int i = 0; i < trainSize*3; i+=3) { dataContainer.push_back(std::make_tuple(x1[i], x1[i + 1], x1[i + 2])); } CHECK(cudaMalloc(&sumMeans, k*dim * sizeof(float))); CHECK(cudaMalloc(&counts, k*sizeof(float))); dim3 block(1024); dim3 grid((trainSize + block.x - 1) / block.x); clock_t start = clock(); for (int itr = 0; itr < number_of_iterations; itr++) { cudaMemset(sumMeans, 0, k*dim*sizeof(float)); cudaMemset(counts, 0, k*sizeof(float)); cluster_assignment << <grid, block >> > (trainImagesGPU, trainSize, meansGPU, sumMeans, k, counts, dim,labelGPU); CHECK(cudaDeviceSynchronize()); compute_means << <1, k >> > (meansGPU, sumMeans, counts, dim); CHECK(cudaDeviceSynchronize()); //if (itr % 10 == 0) //{ CHECK(cudaMemcpy(meansCPU, meansGPU, k*dim * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(labelCPU, labelGPU, trainSize * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < num; i++) { assignmentContainer.push_back(labelCPU[i]); } printf(" The assignmnet container size is : %lld \n", assignmentContainer.size()); cout << " iteration is" << itr; //} } printf(" The assignmnet container size is : %lld \n", assignmentContainer.size()); /* CHECK(cudaMemcpy(meansCPU, meansGPU, k*dim * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(labelCPU, labelGPU, trainSize * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < num; i++) assignmentContainer.push_back(labelCPU[i]);*/ CHECK(cudaMemcpy(meansCPU, meansGPU, k*dim * sizeof(float), cudaMemcpyDeviceToHost)); printf("first center is %f %f %f", meansCPU[0], meansCPU[1], meansCPU[2]); printf("second center is %f %f %f", meansCPU[3], meansCPU[4], meansCPU[5]); printf("third center is %f %f %f", meansCPU[6], meansCPU[7], meansCPU[8]); printf("time elapsed:%.8lfs\n\n", (clock() - start) / (double)CLOCKS_PER_SEC); printf("K-means are computed\n"); CHECK(cudaDeviceSynchronize()); //computing PCA by SVD with CuSolver printf("Starting up graphics controller\n"); GraphicsController graphics(1920,1080); graphics.initGL(&argc, argv); graphics.run(); CHECK(cudaDeviceReset()); printf("Program completed executing\n"); return 0; }
155b11fd3acaa627e6bf3c9dbf3fc2b8cff312f8.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <fstream> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> //Se definen los valores fijos a utilizar en el programa #define H 288 //Cada bloque manejara 100 datos correspondientes a 5 minutos de mediciones en intervalos de 3 segundos #define B 2 //Se trabajaran 2 bloques, uno para cada dia #define VUELTAS 28800 //Cantidad de datos por arreglo #define N 30 //Varible utilizada en pruebas using namespace std; __global__ void inversion(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = x[N-1-i]; } __global__ void raices(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = sqrt (x[i]); } __global__ void potencia3(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = pow ((double)x[i],3.0); } __global__ void media(float* arreglo) { float sumatoria = 0; float med = 0; //54 for(int i=0;i<VUELTAS;i++){ sumatoria = sumatoria + arreglo[i]; } med = sumatoria/(float) VUELTAS; sumatoria = med; } //Subrutina que calcula cual fue la mayor medicion en el dia con hora a la que fue medida __global__ void mayor(float* arreglo){ float may=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]>may){ may=arreglo[i];} } } //Subrutina que calcula cual fue la menor medicion en el dia con hora a la que fue medida __global__ void menor(float* arreglo){ float men=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]<men){ men=arreglo[i];} } } //Subrutina que calcula la prediccion de datos para un dia siguiente a traves de la regresion lineal de un tipo de medicion hecha por cada 5 minutos en intervalos de 3 segundos __global__ void prediccion(float* arreglo, float* salida){ int i = blockIdx.x*blockDim.x + threadIdx.x; int q = 0; float k = 100.0; float m = 0; float sumatoria = 0; float sumasDif = 0; float potencia = 0; float pendiente = 0; //float nueva = 0; q = i*100; for(int j = q; j<q+100; j++){ sumatoria = sumatoria + arreglo[j]; } sumatoria = sumatoria/k; for(int j = q; j<q+100; j++){ sumasDif = arreglo[j] - sumatoria; } potencia = (float)pow((double)sumasDif,2.00); pendiente = potencia/k; for(int j = q; j<q+100; j++){ salida[j] = sumatoria + pendiente*m; m = m + 1; } } //Inicio del programa int main(void) { // declaraciones de componentes CUDA, Streams y memoria hipStream_t stream1, stream2, stream3, stream4, stream5, stream6; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); hipStreamCreate(&stream5); hipStreamCreate(&stream6); //Se abren los archivos y se limpian ofstream ArchivoPrediccion("181113_estCU.csv"); ArchivoPrediccion.close(); ofstream ArchivoPrediccion2("181114_estCU.csv"); ArchivoPrediccion2.close(); //Se crean los vectores que guardaran los string de horas de los archivos .csv string horas[VUELTAS]; string horas2[VUELTAS]; //Se inician las variables que guardaran los tiempos de ejecucion de cada kernel float milliseconds1 = 0; float milliseconds2 = 0; float milliseconds3 = 0; float milliseconds4 = 0; float milliseconds5 = 0; float milliseconds6 = 0; //Se crean las variables de vectores que llevaran datos y compiaran entre el host y el device float *vectorTemperatura1, *vectorHumedad1, *vectorPresion1, *res_stream1, *res_stream2, *res_stream3; float *vectorTemperatura2, *vectorHumedad2, *vectorPresion2, *res_stream4, *res_stream5, *res_stream6; float *dev_res1, *dev_res2, *dev_res3; float *dev_res4, *dev_res5, *dev_res6; // reserva en el host // reserva en el device hipMalloc( (void**)&dev_res1, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res2, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res3, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res4, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res5, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res6, VUELTAS*sizeof(float)); //Asignacion de memoria al host hipHostMalloc((void**)&vectorTemperatura1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&vectorHumedad1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&vectorPresion1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&vectorTemperatura2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&vectorHumedad2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&vectorPresion2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&res_stream1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&res_stream2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&res_stream3,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&res_stream4,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&res_stream5,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostMalloc((void**)&res_stream6,VUELTAS*sizeof(float),hipHostMallocDefault); // se crean los eventos hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); ///////////////////////////////////////////////////////////////////////////////////////////////// // Inicializacion de datos por lectura de archivos .csv // Se leen los datos del dia 1 ifstream datos("181113.csv"); string linea; int contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion1[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 1 for(int i=0;i < H;i++){ // copia de datos hacia el device hipMemcpyAsync(dev_res1, vectorTemperatura1, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream1); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); hipLaunchKernelGGL(( prediccion), dim3(1), dim3(H), 0, 0, vectorTemperatura1, dev_res1); hipEventRecord(stop); hipMemcpyAsync(res_stream1, dev_res1, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream1); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds1, start, stop); ///////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res2, vectorHumedad1, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream2); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); hipLaunchKernelGGL(( prediccion), dim3(1), dim3(H), 0, 0, vectorHumedad1, dev_res2); hipEventRecord(stop); hipMemcpyAsync(res_stream2, dev_res2, VUELTAS*sizeof(float), hipMemcpyDeviceToHost, stream2); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds2, start, stop); //////////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res3, vectorPresion1, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream3); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); hipLaunchKernelGGL(( prediccion), dim3(1), dim3(H), 0, 0, vectorPresion1, dev_res3); hipEventRecord(stop); hipMemcpyAsync(res_stream3, dev_res3, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream3); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds3, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams hipStreamSynchronize(stream1); // wait for stream1 to finish hipStreamSynchronize(stream2); // wait for stream2 to finish hipStreamSynchronize(stream3); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////////////////////////////////// // Se leen los datos del dia 2 ifstream datos2("181114.csv"); contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos2,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas2[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion2[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 2 for(int i=0;i < H;i++){ // copia de datos hacia el device hipMemcpyAsync(dev_res4, vectorTemperatura2, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream4); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); hipLaunchKernelGGL(( prediccion), dim3(1), dim3(H), 0, 0, vectorTemperatura2, dev_res4); hipEventRecord(stop); hipMemcpyAsync(res_stream4, dev_res4, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream4); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds4, start, stop); ///////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res5, vectorHumedad2, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream5); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); hipLaunchKernelGGL(( prediccion), dim3(1), dim3(H), 0, 0, vectorHumedad2, dev_res5); hipEventRecord(stop); hipMemcpyAsync(res_stream5, dev_res5, VUELTAS*sizeof(float), hipMemcpyDeviceToHost, stream5); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds5, start, stop); //////////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res6, vectorPresion2, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream6); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); hipLaunchKernelGGL(( prediccion), dim3(1), dim3(H), 0, 0, vectorPresion2, dev_res6); hipEventRecord(stop); hipMemcpyAsync(res_stream6, dev_res6, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream6); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds6, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams hipStreamSynchronize(stream4); // wait for stream1 to finish hipStreamSynchronize(stream5); // wait for stream2 to finish hipStreamSynchronize(stream6); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////// //Se guardan los datos predecidos en un archivo csv correspondiente ofstream Archivo("181113_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo << horas[i] << ";" << res_stream1[i] << ";" << res_stream2[i] << ";" << res_stream3[i] << ";" << endl; } Archivo.close(); ofstream Archivo2("181114_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo2 << horas2[i] << ";" << res_stream4[i] << ";" << res_stream5[i] << ";" << res_stream6[i] << ";" << endl; } Archivo2.close(); //Se imprimen los tiempos que tardaron cada uno de los kernels printf("Tiempo del kernel para la prediccion de temperaturas del dia 1: %f milisegundos\n", milliseconds1); printf("Tiempo del kernel para la prediccion de humedades del dia 1: %f milisegundos\n", milliseconds2); printf("Tiempo del kernel para la prediccion de presiones del dia 1: %f milisegundos\n", milliseconds3); printf("Tiempo del kernel para la prediccion de temperaturas del dia 2: %f milisegundos\n", milliseconds4); printf("Tiempo del kernel para la prediccion de humedades del dia 2: %f milisegundos\n", milliseconds5); printf("Tiempo del kernel para la prediccion de presiones del dia 2: %f milisegundos\n", milliseconds6); //Se destruyen todos los componentes CUDA y se libera la memoria hipEventDestroy(start); hipEventDestroy(stop); hipStreamDestroy(stream1); hipStreamDestroy(stream2); hipStreamDestroy(stream3); hipStreamDestroy(stream4); hipStreamDestroy(stream5); hipStreamDestroy(stream6); hipFree(dev_res1); hipFree(dev_res2); hipFree(dev_res3); hipFree(dev_res4); hipFree(dev_res5); hipFree(dev_res6); // salida printf("\npulsa INTRO para finalizar..."); fflush(stdin); char tecla = getchar(); return 0; }
155b11fd3acaa627e6bf3c9dbf3fc2b8cff312f8.cu
#include <math.h> #include <fstream> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> //Se definen los valores fijos a utilizar en el programa #define H 288 //Cada bloque manejara 100 datos correspondientes a 5 minutos de mediciones en intervalos de 3 segundos #define B 2 //Se trabajaran 2 bloques, uno para cada dia #define VUELTAS 28800 //Cantidad de datos por arreglo #define N 30 //Varible utilizada en pruebas using namespace std; __global__ void inversion(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = x[N-1-i]; } __global__ void raices(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = sqrt (x[i]); } __global__ void potencia3(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = pow ((double)x[i],3.0); } __global__ void media(float* arreglo) { float sumatoria = 0; float med = 0; //54 for(int i=0;i<VUELTAS;i++){ sumatoria = sumatoria + arreglo[i]; } med = sumatoria/(float) VUELTAS; sumatoria = med; } //Subrutina que calcula cual fue la mayor medicion en el dia con hora a la que fue medida __global__ void mayor(float* arreglo){ float may=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]>may){ may=arreglo[i];} } } //Subrutina que calcula cual fue la menor medicion en el dia con hora a la que fue medida __global__ void menor(float* arreglo){ float men=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]<men){ men=arreglo[i];} } } //Subrutina que calcula la prediccion de datos para un dia siguiente a traves de la regresion lineal de un tipo de medicion hecha por cada 5 minutos en intervalos de 3 segundos __global__ void prediccion(float* arreglo, float* salida){ int i = blockIdx.x*blockDim.x + threadIdx.x; int q = 0; float k = 100.0; float m = 0; float sumatoria = 0; float sumasDif = 0; float potencia = 0; float pendiente = 0; //float nueva = 0; q = i*100; for(int j = q; j<q+100; j++){ sumatoria = sumatoria + arreglo[j]; } sumatoria = sumatoria/k; for(int j = q; j<q+100; j++){ sumasDif = arreglo[j] - sumatoria; } potencia = (float)pow((double)sumasDif,2.00); pendiente = potencia/k; for(int j = q; j<q+100; j++){ salida[j] = sumatoria + pendiente*m; m = m + 1; } } //Inicio del programa int main(void) { // declaraciones de componentes CUDA, Streams y memoria cudaStream_t stream1, stream2, stream3, stream4, stream5, stream6; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); cudaStreamCreate(&stream5); cudaStreamCreate(&stream6); //Se abren los archivos y se limpian ofstream ArchivoPrediccion("181113_estCU.csv"); ArchivoPrediccion.close(); ofstream ArchivoPrediccion2("181114_estCU.csv"); ArchivoPrediccion2.close(); //Se crean los vectores que guardaran los string de horas de los archivos .csv string horas[VUELTAS]; string horas2[VUELTAS]; //Se inician las variables que guardaran los tiempos de ejecucion de cada kernel float milliseconds1 = 0; float milliseconds2 = 0; float milliseconds3 = 0; float milliseconds4 = 0; float milliseconds5 = 0; float milliseconds6 = 0; //Se crean las variables de vectores que llevaran datos y compiaran entre el host y el device float *vectorTemperatura1, *vectorHumedad1, *vectorPresion1, *res_stream1, *res_stream2, *res_stream3; float *vectorTemperatura2, *vectorHumedad2, *vectorPresion2, *res_stream4, *res_stream5, *res_stream6; float *dev_res1, *dev_res2, *dev_res3; float *dev_res4, *dev_res5, *dev_res6; // reserva en el host // reserva en el device cudaMalloc( (void**)&dev_res1, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res2, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res3, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res4, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res5, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res6, VUELTAS*sizeof(float)); //Asignacion de memoria al host cudaHostAlloc((void**)&vectorTemperatura1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorHumedad1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorPresion1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorTemperatura2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorHumedad2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorPresion2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream3,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream4,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream5,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream6,VUELTAS*sizeof(float),cudaHostAllocDefault); // se crean los eventos cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ///////////////////////////////////////////////////////////////////////////////////////////////// // Inicializacion de datos por lectura de archivos .csv // Se leen los datos del dia 1 ifstream datos("181113.csv"); string linea; int contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion1[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 1 for(int i=0;i < H;i++){ // copia de datos hacia el device cudaMemcpyAsync(dev_res1, vectorTemperatura1, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream1); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorTemperatura1, dev_res1); cudaEventRecord(stop); cudaMemcpyAsync(res_stream1, dev_res1, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream1); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds1, start, stop); ///////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res2, vectorHumedad1, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream2); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorHumedad1, dev_res2); cudaEventRecord(stop); cudaMemcpyAsync(res_stream2, dev_res2, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost, stream2); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds2, start, stop); //////////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res3, vectorPresion1, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream3); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorPresion1, dev_res3); cudaEventRecord(stop); cudaMemcpyAsync(res_stream3, dev_res3, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream3); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds3, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams cudaStreamSynchronize(stream1); // wait for stream1 to finish cudaStreamSynchronize(stream2); // wait for stream2 to finish cudaStreamSynchronize(stream3); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////////////////////////////////// // Se leen los datos del dia 2 ifstream datos2("181114.csv"); contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos2,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas2[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion2[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 2 for(int i=0;i < H;i++){ // copia de datos hacia el device cudaMemcpyAsync(dev_res4, vectorTemperatura2, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream4); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorTemperatura2, dev_res4); cudaEventRecord(stop); cudaMemcpyAsync(res_stream4, dev_res4, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream4); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds4, start, stop); ///////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res5, vectorHumedad2, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream5); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorHumedad2, dev_res5); cudaEventRecord(stop); cudaMemcpyAsync(res_stream5, dev_res5, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost, stream5); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds5, start, stop); //////////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res6, vectorPresion2, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream6); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorPresion2, dev_res6); cudaEventRecord(stop); cudaMemcpyAsync(res_stream6, dev_res6, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream6); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds6, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams cudaStreamSynchronize(stream4); // wait for stream1 to finish cudaStreamSynchronize(stream5); // wait for stream2 to finish cudaStreamSynchronize(stream6); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////// //Se guardan los datos predecidos en un archivo csv correspondiente ofstream Archivo("181113_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo << horas[i] << ";" << res_stream1[i] << ";" << res_stream2[i] << ";" << res_stream3[i] << ";" << endl; } Archivo.close(); ofstream Archivo2("181114_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo2 << horas2[i] << ";" << res_stream4[i] << ";" << res_stream5[i] << ";" << res_stream6[i] << ";" << endl; } Archivo2.close(); //Se imprimen los tiempos que tardaron cada uno de los kernels printf("Tiempo del kernel para la prediccion de temperaturas del dia 1: %f milisegundos\n", milliseconds1); printf("Tiempo del kernel para la prediccion de humedades del dia 1: %f milisegundos\n", milliseconds2); printf("Tiempo del kernel para la prediccion de presiones del dia 1: %f milisegundos\n", milliseconds3); printf("Tiempo del kernel para la prediccion de temperaturas del dia 2: %f milisegundos\n", milliseconds4); printf("Tiempo del kernel para la prediccion de humedades del dia 2: %f milisegundos\n", milliseconds5); printf("Tiempo del kernel para la prediccion de presiones del dia 2: %f milisegundos\n", milliseconds6); //Se destruyen todos los componentes CUDA y se libera la memoria cudaEventDestroy(start); cudaEventDestroy(stop); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaStreamDestroy(stream3); cudaStreamDestroy(stream4); cudaStreamDestroy(stream5); cudaStreamDestroy(stream6); cudaFree(dev_res1); cudaFree(dev_res2); cudaFree(dev_res3); cudaFree(dev_res4); cudaFree(dev_res5); cudaFree(dev_res6); // salida printf("\npulsa INTRO para finalizar..."); fflush(stdin); char tecla = getchar(); return 0; }
8aca107235674d5dc86ceb831d224413f544aee9.hip
// !!! This is a file automatically generated by hipify!!! #define CATCH_CONFIG_MAIN #include <catch2/catch.hpp> #include <stdlib.h> #include <iostream> #include <string> #include <cuq.h> #include <hip/hip_runtime.h> using namespace std; int devices[128]; char errorMsg[1000]; TEST_CASE("GPU occupation", "[occupation]") { CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES=0", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=0"); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES=\"0\"", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=\"0\""); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES='0'", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES='0'"); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES=0,1", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=0,1"); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("A process tries to occupy the same GPU two times", "[occupation]") { CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); //Do it once again to be sure it works if a process which already occupied a GPU requests to occupy it one more time CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); } TEST_CASE("More GPUs than in CUDA_VISIBLE_DEVICES are requested", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=0"); CHECK(occupyDevices(2, devices, errorMsg) == -1); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("More GPUs than exists on the machine are requested", "[occupation]") { int realDeviceCount; hipGetDeviceCount(&realDeviceCount); CHECK(occupyDevices(realDeviceCount + 1, devices, errorMsg) == -1); } TEST_CASE("Something unreal is in CUDA_VISIBLE_DEVICES=32", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=32"); CHECK(occupyDevices(1, devices, errorMsg) == -1); putenv((char*)"CUDA_VISIBLE_DEVICES="); }
8aca107235674d5dc86ceb831d224413f544aee9.cu
#define CATCH_CONFIG_MAIN #include <catch2/catch.hpp> #include <stdlib.h> #include <iostream> #include <string> #include <cuq.h> #include <cuda_runtime.h> using namespace std; int devices[128]; char errorMsg[1000]; TEST_CASE("GPU occupation", "[occupation]") { CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES=0", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=0"); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES=\"0\"", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=\"0\""); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES='0'", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES='0'"); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("GPU occupation with CUDA_VISIBLE_DEVICES=0,1", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=0,1"); CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("A process tries to occupy the same GPU two times", "[occupation]") { CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); //Do it once again to be sure it works if a process which already occupied a GPU requests to occupy it one more time CHECK(occupyDevices(1, devices, errorMsg) == 0); CHECK(devices[0] == 0); } TEST_CASE("More GPUs than in CUDA_VISIBLE_DEVICES are requested", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=0"); CHECK(occupyDevices(2, devices, errorMsg) == -1); putenv((char*)"CUDA_VISIBLE_DEVICES="); } TEST_CASE("More GPUs than exists on the machine are requested", "[occupation]") { int realDeviceCount; cudaGetDeviceCount(&realDeviceCount); CHECK(occupyDevices(realDeviceCount + 1, devices, errorMsg) == -1); } TEST_CASE("Something unreal is in CUDA_VISIBLE_DEVICES=32", "[occupation][CUDA_VISIBLE_DEVICES]") { putenv((char*)"CUDA_VISIBLE_DEVICES=32"); CHECK(occupyDevices(1, devices, errorMsg) == -1); putenv((char*)"CUDA_VISIBLE_DEVICES="); }
9be9294504762942863e8ce6c20179d148a2644e.hip
// !!! This is a file automatically generated by hipify!!! #include "ATen/Context.h" #include "ATen/hip/HIPContext.h" #include "ATen/Dispatch.h" #include "ATen/NativeFunctions.h" #include "ATen/hip/PinnedMemoryAllocator.h" #include "ATen/hip/HIPApplyUtils.cuh" #include "ATen/native/LinearAlgebraUtils.h" #include "ATen/native/Gesv.h" #include "THH.h" // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaGesvBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) { AT_ERROR("gesv only takes float or double Tensors"); } template<> void magmaGesvBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) { magma_sgesv_batched( n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, queue); } template<> void magmaGesvBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) { magma_dgesv_batched( n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, queue); } static magma_queue_t createMagmaQueue(const Tensor& tensor) { auto& context = at::globalContext(); magma_queue_t magma_queue; magma_queue_create_from_hip( tensor.get_device(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCState_getCurrentBlasHandle(context.getTHCState()), THCState_getCurrentSparseHandle(context.getTHCState()), &magma_queue); return magma_queue; } static inline magma_int_t magma_int_cast(int64_t value, const char* varname) { auto result = static_cast<magma_int_t>(value); if (static_cast<int64_t>(result) != value) { AT_ERROR("magma: The value of %s (%lld) is too large to fit into a magma_int_t (%llu bytes)", varname, (long long)value, sizeof(magma_int_t)); } return result; } #endif // Creates an array of size elements of type T, backed by pinned memory // wrapped in a Storage template<class T> static inline Storage pin_memory(int64_t size, Tensor dummy) { int64_t adjusted_size = size * sizeof(T); auto* allocator = cuda::getPinnedMemoryAllocator(); auto& backend = dummy.type().toBackend(Backend::CPU).toScalarType(kByte); return backend.storageWithAllocator(adjusted_size, allocator); } #define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \ auto storage_##name = pin_memory<type>(size, dummy_tensor); \ name = static_cast<type*>(storage_##name.data()); template <typename scalar_t> static void applyGesv(Tensor& b, Tensor& A, std::vector<int64_t> infos) { #ifndef USE_MAGMA AT_ERROR("gesv: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data<scalar_t>(); auto b_data = b.data<scalar_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } magmaGesvBatched<scalar_t>( n, nrhs, A_array, n, ipiv_array, b_array, n, info_array, batch_size, createMagmaQueue(b)); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } std::tuple<Tensor,Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) { std::vector<int64_t> infos(batchCount(A), 0); auto A_working_copy = cloneBatchedColumnMajor(A); auto b_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{ applyGesv<scalar_t>(b_working_copy, A_working_copy, infos); }); checkErrors(infos); return std::tuple<Tensor,Tensor>(b_working_copy, A_working_copy); } }} // namespace at::native #undef ALLOCATE_ARRAY
9be9294504762942863e8ce6c20179d148a2644e.cu
#include "ATen/Context.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/Dispatch.h" #include "ATen/NativeFunctions.h" #include "ATen/cuda/PinnedMemoryAllocator.h" #include "ATen/cuda/CUDAApplyUtils.cuh" #include "ATen/native/LinearAlgebraUtils.h" #include "ATen/native/Gesv.h" #include "THC.h" // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaGesvBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) { AT_ERROR("gesv only takes float or double Tensors"); } template<> void magmaGesvBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) { magma_sgesv_batched( n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, queue); } template<> void magmaGesvBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) { magma_dgesv_batched( n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, queue); } static magma_queue_t createMagmaQueue(const Tensor& tensor) { auto& context = at::globalContext(); magma_queue_t magma_queue; magma_queue_create_from_cuda( tensor.get_device(), at::cuda::getCurrentCUDAStream(), THCState_getCurrentBlasHandle(context.getTHCState()), THCState_getCurrentSparseHandle(context.getTHCState()), &magma_queue); return magma_queue; } static inline magma_int_t magma_int_cast(int64_t value, const char* varname) { auto result = static_cast<magma_int_t>(value); if (static_cast<int64_t>(result) != value) { AT_ERROR("magma: The value of %s (%lld) is too large to fit into a magma_int_t (%llu bytes)", varname, (long long)value, sizeof(magma_int_t)); } return result; } #endif // Creates an array of size elements of type T, backed by pinned memory // wrapped in a Storage template<class T> static inline Storage pin_memory(int64_t size, Tensor dummy) { int64_t adjusted_size = size * sizeof(T); auto* allocator = cuda::getPinnedMemoryAllocator(); auto& backend = dummy.type().toBackend(Backend::CPU).toScalarType(kByte); return backend.storageWithAllocator(adjusted_size, allocator); } #define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \ auto storage_##name = pin_memory<type>(size, dummy_tensor); \ name = static_cast<type*>(storage_##name.data()); template <typename scalar_t> static void applyGesv(Tensor& b, Tensor& A, std::vector<int64_t> infos) { #ifndef USE_MAGMA AT_ERROR("gesv: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data<scalar_t>(); auto b_data = b.data<scalar_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } magmaGesvBatched<scalar_t>( n, nrhs, A_array, n, ipiv_array, b_array, n, info_array, batch_size, createMagmaQueue(b)); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } std::tuple<Tensor,Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) { std::vector<int64_t> infos(batchCount(A), 0); auto A_working_copy = cloneBatchedColumnMajor(A); auto b_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{ applyGesv<scalar_t>(b_working_copy, A_working_copy, infos); }); checkErrors(infos); return std::tuple<Tensor,Tensor>(b_working_copy, A_working_copy); } }} // namespace at::native #undef ALLOCATE_ARRAY
e64cf146cc5aa0d5e1bae15f1eb764fe12ac10ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ConnGrowth.h" #include "AllSpikingSynapses.h" #include "Book.h" /* * Update the weight of the Synapses in the simulation. * Note: Platform Dependent. * * @param num_neurons number of neurons to update. * @param neurons the Neuron list to search from. * @param synapses the Synapse list to search from. * @param sim_info SimulationInfo to refer from. * @param m_allNeuronsDevice Reference to the allNeurons struct on device memory. * @param m_allSynapsesDevice Reference to the allSynapses struct on device memory. * @param layout Layout information of the neunal network. */ void ConnGrowth::updateSynapsesWeights(const int num_neurons, IAllNeurons &neurons, IAllSynapses &synapses, const SimulationInfo *sim_info, AllSpikingNeurons* m_allNeuronsDevice, AllSpikingSynapses* m_allSynapsesDevice, Layout *layout) { // For now, we just set the weights to equal the areas. We will later // scale it and set its sign (when we index and get its sign). (*W) = (*area); BGFLOAT deltaT = sim_info->deltaT; // CUDA parameters const int threadsPerBlock = 256; int blocksPerGrid; // allocate device memories BGSIZE W_d_size = sim_info->totalNeurons * sim_info->totalNeurons * sizeof (BGFLOAT); BGFLOAT* W_h = new BGFLOAT[W_d_size]; BGFLOAT* W_d; HANDLE_ERROR( hipMalloc ( ( void ** ) &W_d, W_d_size ) ); neuronType* neuron_type_map_d; HANDLE_ERROR( hipMalloc( ( void ** ) &neuron_type_map_d, sim_info->totalNeurons * sizeof( neuronType ) ) ); // copy weight data to the device memory for ( int i = 0 ; i < sim_info->totalNeurons; i++ ) for ( int j = 0; j < sim_info->totalNeurons; j++ ) W_h[i * sim_info->totalNeurons + j] = (*W)(i, j); HANDLE_ERROR( hipMemcpy ( W_d, W_h, W_d_size, hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( neuron_type_map_d, layout->neuron_type_map, sim_info->totalNeurons * sizeof( neuronType ), hipMemcpyHostToDevice ) ); fpCreateSynapse_t fpCreateSynapse_h; synapses.getFpCreateSynapse(fpCreateSynapse_h); blocksPerGrid = ( sim_info->totalNeurons + threadsPerBlock - 1 ) / threadsPerBlock; hipLaunchKernelGGL(( updateSynapsesWeightsDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, sim_info->totalNeurons, deltaT, W_d, sim_info->maxSynapsesPerNeuron, m_allNeuronsDevice, m_allSynapsesDevice, (void (*)(AllSpikingSynapses*, const int, const int, int, int, BGFLOAT*, const BGFLOAT, synapseType))fpCreateSynapse_h, neuron_type_map_d ); // free memories HANDLE_ERROR( hipFree( W_d ) ); delete[] W_h; HANDLE_ERROR( hipFree( neuron_type_map_d ) ); // copy device synapse count to host memory synapses.copyDeviceSynapseCountsToHost(m_allSynapsesDevice, sim_info); // copy device synapse summation coordinate to host memory synapses.copyDeviceSynapseSumIdxToHost(m_allSynapsesDevice, sim_info); } /* * Adjust the strength of the synapse or remove it from the synapse map if it has gone below * zero. * * @param[in] num_neurons Number of neurons. * @param[in] deltaT The time step size. * @param[in] W_d Array of synapse weight. * @param[in] maxSynapses Maximum number of synapses per neuron. * @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory. * @param[in] allSynapsesDevice Pointer to the Synapse structures in device memory. * @param[in] fpCreateSynapse Function pointer to the createSynapse device function. */ __global__ void updateSynapsesWeightsDevice( int num_neurons, BGFLOAT deltaT, BGFLOAT* W_d, int maxSynapses, AllSpikingNeurons* allNeuronsDevice, AllSpikingSynapses* allSynapsesDevice, void (*fpCreateSynapse)(AllSpikingSynapses*, const int, const int, int, int, BGFLOAT*, const BGFLOAT, synapseType), neuronType* neuron_type_map_d ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= num_neurons ) return; int adjusted = 0; //int could_have_been_removed = 0; // TODO: use this value int removed = 0; int added = 0; // Scale and add sign to the areas // visit each neuron 'a' int src_neuron = idx; // and each destination neuron 'b' for (int dest_neuron = 0; dest_neuron < num_neurons; dest_neuron++) { // visit each synapse at (xa,ya) bool connected = false; synapseType type = synType(neuron_type_map_d, src_neuron, dest_neuron); // for each existing synapse BGSIZE existing_synapses = allSynapsesDevice->synapse_counts[src_neuron]; int existing_synapses_checked = 0; for (BGSIZE synapse_index = 0; (existing_synapses_checked < existing_synapses) && !connected; synapse_index++) { BGSIZE iSyn = maxSynapses * src_neuron + synapse_index; if (allSynapsesDevice->in_use[iSyn] == true) { // if there is a synapse between a and b if (allSynapsesDevice->destNeuronIndex[iSyn] == dest_neuron) { connected = true; adjusted++; // adjust the strength of the synapse or remove // it from the synapse map if it has gone below // zero. if (W_d[src_neuron * num_neurons + dest_neuron] < 0) { removed++; eraseSpikingSynapse(allSynapsesDevice, src_neuron, synapse_index, maxSynapses); } else { // adjust // g_synapseStrengthAdjustmentConstant is 1.0e-8; allSynapsesDevice->W[iSyn] = W_d[src_neuron * num_neurons + dest_neuron] * synSign(type) * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT; } } existing_synapses_checked++; } } // if not connected and weight(a,b) > 0, add a new synapse from a to b if (!connected && (W_d[src_neuron * num_neurons + dest_neuron] > 0)) { // locate summation point BGFLOAT* sum_point = &( allNeuronsDevice->summation_map[dest_neuron] ); added++; addSpikingSynapse(allSynapsesDevice, type, src_neuron, dest_neuron, src_neuron, dest_neuron, sum_point, deltaT, W_d, num_neurons, fpCreateSynapse); } } }
e64cf146cc5aa0d5e1bae15f1eb764fe12ac10ac.cu
#include "ConnGrowth.h" #include "AllSpikingSynapses.h" #include "Book.h" /* * Update the weight of the Synapses in the simulation. * Note: Platform Dependent. * * @param num_neurons number of neurons to update. * @param neurons the Neuron list to search from. * @param synapses the Synapse list to search from. * @param sim_info SimulationInfo to refer from. * @param m_allNeuronsDevice Reference to the allNeurons struct on device memory. * @param m_allSynapsesDevice Reference to the allSynapses struct on device memory. * @param layout Layout information of the neunal network. */ void ConnGrowth::updateSynapsesWeights(const int num_neurons, IAllNeurons &neurons, IAllSynapses &synapses, const SimulationInfo *sim_info, AllSpikingNeurons* m_allNeuronsDevice, AllSpikingSynapses* m_allSynapsesDevice, Layout *layout) { // For now, we just set the weights to equal the areas. We will later // scale it and set its sign (when we index and get its sign). (*W) = (*area); BGFLOAT deltaT = sim_info->deltaT; // CUDA parameters const int threadsPerBlock = 256; int blocksPerGrid; // allocate device memories BGSIZE W_d_size = sim_info->totalNeurons * sim_info->totalNeurons * sizeof (BGFLOAT); BGFLOAT* W_h = new BGFLOAT[W_d_size]; BGFLOAT* W_d; HANDLE_ERROR( cudaMalloc ( ( void ** ) &W_d, W_d_size ) ); neuronType* neuron_type_map_d; HANDLE_ERROR( cudaMalloc( ( void ** ) &neuron_type_map_d, sim_info->totalNeurons * sizeof( neuronType ) ) ); // copy weight data to the device memory for ( int i = 0 ; i < sim_info->totalNeurons; i++ ) for ( int j = 0; j < sim_info->totalNeurons; j++ ) W_h[i * sim_info->totalNeurons + j] = (*W)(i, j); HANDLE_ERROR( cudaMemcpy ( W_d, W_h, W_d_size, cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( neuron_type_map_d, layout->neuron_type_map, sim_info->totalNeurons * sizeof( neuronType ), cudaMemcpyHostToDevice ) ); fpCreateSynapse_t fpCreateSynapse_h; synapses.getFpCreateSynapse(fpCreateSynapse_h); blocksPerGrid = ( sim_info->totalNeurons + threadsPerBlock - 1 ) / threadsPerBlock; updateSynapsesWeightsDevice <<< blocksPerGrid, threadsPerBlock >>> ( sim_info->totalNeurons, deltaT, W_d, sim_info->maxSynapsesPerNeuron, m_allNeuronsDevice, m_allSynapsesDevice, (void (*)(AllSpikingSynapses*, const int, const int, int, int, BGFLOAT*, const BGFLOAT, synapseType))fpCreateSynapse_h, neuron_type_map_d ); // free memories HANDLE_ERROR( cudaFree( W_d ) ); delete[] W_h; HANDLE_ERROR( cudaFree( neuron_type_map_d ) ); // copy device synapse count to host memory synapses.copyDeviceSynapseCountsToHost(m_allSynapsesDevice, sim_info); // copy device synapse summation coordinate to host memory synapses.copyDeviceSynapseSumIdxToHost(m_allSynapsesDevice, sim_info); } /* * Adjust the strength of the synapse or remove it from the synapse map if it has gone below * zero. * * @param[in] num_neurons Number of neurons. * @param[in] deltaT The time step size. * @param[in] W_d Array of synapse weight. * @param[in] maxSynapses Maximum number of synapses per neuron. * @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory. * @param[in] allSynapsesDevice Pointer to the Synapse structures in device memory. * @param[in] fpCreateSynapse Function pointer to the createSynapse device function. */ __global__ void updateSynapsesWeightsDevice( int num_neurons, BGFLOAT deltaT, BGFLOAT* W_d, int maxSynapses, AllSpikingNeurons* allNeuronsDevice, AllSpikingSynapses* allSynapsesDevice, void (*fpCreateSynapse)(AllSpikingSynapses*, const int, const int, int, int, BGFLOAT*, const BGFLOAT, synapseType), neuronType* neuron_type_map_d ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= num_neurons ) return; int adjusted = 0; //int could_have_been_removed = 0; // TODO: use this value int removed = 0; int added = 0; // Scale and add sign to the areas // visit each neuron 'a' int src_neuron = idx; // and each destination neuron 'b' for (int dest_neuron = 0; dest_neuron < num_neurons; dest_neuron++) { // visit each synapse at (xa,ya) bool connected = false; synapseType type = synType(neuron_type_map_d, src_neuron, dest_neuron); // for each existing synapse BGSIZE existing_synapses = allSynapsesDevice->synapse_counts[src_neuron]; int existing_synapses_checked = 0; for (BGSIZE synapse_index = 0; (existing_synapses_checked < existing_synapses) && !connected; synapse_index++) { BGSIZE iSyn = maxSynapses * src_neuron + synapse_index; if (allSynapsesDevice->in_use[iSyn] == true) { // if there is a synapse between a and b if (allSynapsesDevice->destNeuronIndex[iSyn] == dest_neuron) { connected = true; adjusted++; // adjust the strength of the synapse or remove // it from the synapse map if it has gone below // zero. if (W_d[src_neuron * num_neurons + dest_neuron] < 0) { removed++; eraseSpikingSynapse(allSynapsesDevice, src_neuron, synapse_index, maxSynapses); } else { // adjust // g_synapseStrengthAdjustmentConstant is 1.0e-8; allSynapsesDevice->W[iSyn] = W_d[src_neuron * num_neurons + dest_neuron] * synSign(type) * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT; } } existing_synapses_checked++; } } // if not connected and weight(a,b) > 0, add a new synapse from a to b if (!connected && (W_d[src_neuron * num_neurons + dest_neuron] > 0)) { // locate summation point BGFLOAT* sum_point = &( allNeuronsDevice->summation_map[dest_neuron] ); added++; addSpikingSynapse(allSynapsesDevice, type, src_neuron, dest_neuron, src_neuron, dest_neuron, sum_point, deltaT, W_d, num_neurons, fpCreateSynapse); } } }
afb0b9468671d508787357b62c2fc64fec2773b8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> #include <pthread.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> /****************************************************************************** Displays two grey scale images. On the left is an image that has come from an image processing pipeline, just after colour thresholding. On the right is the result of applying an edge detection convolution operator to the left image. This program performs that convolution. Things to note: - A single unsigned char stores a pixel intensity value. 0 is black, 256 is white. - The colour mode used is GL_LUMINANCE. This uses a single number to represent a pixel's intensity. In this case we want 256 shades of grey, which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as the pixel data type. To compile adapt the code below wo match your filenames: nvcc -o ip_coursework_006 ip_coursework_006.cu -lglut -lGL -lm Dr Kevan Buckley, University of Wolverhampton, 2018 ******************************************************************************/ #define width 100 #define height 72 unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,0,255,255,255,255,0,0,0,0,0,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0, 0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0, 0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0, 0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,255,255,255,255,255,0,0,0,0,0,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,0,0,0,0,0,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,0,0,0,0,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,255,255,255,0,0,0,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,0,0,0,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,0,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0, 0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,0,0,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0, 0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0, 0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,0,0,0,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0, 0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,0,0,0,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,0,255,0,0,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; unsigned char results[width * height]; __global__ void kernel(unsigned char *in, unsigned char *out) { int i = blockDim.x * blockIdx.x + threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = blockIdx.x; x = threadIdx.x; if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: // escape tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); int i; signal(SIGINT, sigint_callback); int n_pixels = width * height; for(int q = 0; q< n_pixels; q++){ results[q] = 0; } unsigned char *d_results; unsigned char *d_image; printf("image dimensions %dx%d\n", width, height); hipMalloc((void**) &d_results, sizeof(unsigned char) * n_pixels); hipMalloc((void**) &d_image, sizeof(unsigned char) * n_pixels); hipMemcpy(d_image, image, (sizeof(unsigned char) * 7200), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel) , dim3(100), dim3(72), 0, 0, d_image, d_results); hipMemcpy(results, d_results, (sizeof(unsigned char) * n_pixels), hipMemcpyDeviceToHost); for(int q = 0; q< n_pixels; q++){ printf("%u\n", results[q]); } clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed / 1.0e9)); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Processing Coursework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); hipFree(d_results); hipFree(d_image); return 0; }
afb0b9468671d508787357b62c2fc64fec2773b8.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> #include <pthread.h> #include <cuda_runtime_api.h> #include <cuda.h> #include <cuda_runtime.h> /****************************************************************************** Displays two grey scale images. On the left is an image that has come from an image processing pipeline, just after colour thresholding. On the right is the result of applying an edge detection convolution operator to the left image. This program performs that convolution. Things to note: - A single unsigned char stores a pixel intensity value. 0 is black, 256 is white. - The colour mode used is GL_LUMINANCE. This uses a single number to represent a pixel's intensity. In this case we want 256 shades of grey, which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as the pixel data type. To compile adapt the code below wo match your filenames: nvcc -o ip_coursework_006 ip_coursework_006.cu -lglut -lGL -lm Dr Kevan Buckley, University of Wolverhampton, 2018 ******************************************************************************/ #define width 100 #define height 72 unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,0,255,255,255,255,0,0,0,0,0,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0, 0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255, 255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0, 0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0, 0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,255,255,255,255,255,0,0,0,0,0,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,0,0,0,0,0,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,0,0,0,0,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,255,255,255,0,0,0,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,0,0,0,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,0,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0, 0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,0,0,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0, 0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0, 0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,0,0,0,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0, 0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,0,0,0,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,0,0,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,0,255,0,0,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; unsigned char results[width * height]; __global__ void kernel(unsigned char *in, unsigned char *out) { int i = blockDim.x * blockIdx.x + threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = blockIdx.x; x = threadIdx.x; if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: // escape tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); int i; signal(SIGINT, sigint_callback); int n_pixels = width * height; for(int q = 0; q< n_pixels; q++){ results[q] = 0; } unsigned char *d_results; unsigned char *d_image; printf("image dimensions %dx%d\n", width, height); cudaMalloc((void**) &d_results, sizeof(unsigned char) * n_pixels); cudaMalloc((void**) &d_image, sizeof(unsigned char) * n_pixels); cudaMemcpy(d_image, image, (sizeof(unsigned char) * 7200), cudaMemcpyHostToDevice); kernel <<<100, 72>>>(d_image, d_results); cudaMemcpy(results, d_results, (sizeof(unsigned char) * n_pixels), cudaMemcpyDeviceToHost); for(int q = 0; q< n_pixels; q++){ printf("%u\n", results[q]); } clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed / 1.0e9)); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Processing Coursework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); cudaFree(d_results); cudaFree(d_image); return 0; }
c278f7ef948d8d38519cfed38e48cc4e464fe8d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void alphaaxpy(const int lengthC, const double alpha, const double *a, const double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthC) { c[i] = alpha*a[0]*b[i]+c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! } }
c278f7ef948d8d38519cfed38e48cc4e464fe8d1.cu
#include "includes.h" extern "C" { } __global__ void alphaaxpy(const int lengthC, const double alpha, const double *a, const double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthC) { c[i] = alpha*a[0]*b[i]+c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! } }
645751610c1f75c9eb00e595ba87153070ae9edf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <stdio.h> #include <iostream> #include <ctime> #include <random> #include "GpuTimer.h" #include "tclap/CmdLine.h" #define EPS 0.03 #define BLOCK_WIDTH 16 #define LATTICE_DATA_TYPE float __global__ void DCSKernel(LATTICE_DATA_TYPE *slice, const float *atomXs, const float *atomYs, const float *atomZs, const float *charges, const unsigned short int z, const unsigned int numOfAtoms, const unsigned short int latticeX, const unsigned short int latticeY, const LATTICE_DATA_TYPE latticeGridSpacing) { unsigned int atomIdx = blockDim.x * blockIdx.x + threadIdx.x; if (atomIdx < numOfAtoms) { float atomX = atomXs[atomIdx]; float atomY = atomYs[atomIdx]; float atomZ = atomZs[atomIdx]; float charge = charges[atomIdx]; const unsigned int latticeSliceGridSize = latticeX * latticeY; unsigned int sliceYOffset; unsigned long int sliceIdx; LATTICE_DATA_TYPE dx, dy, dz, dx2, dy2, dz2, dy2dz2, distance; LATTICE_DATA_TYPE potential; dz = atomZ - z * latticeGridSpacing; dz2 = dz * dz; for (unsigned short int y = 0; y < latticeY; y++) { sliceYOffset = latticeX * y; dy = atomY - y * latticeGridSpacing; dy2 = dy * dy; dy2dz2 = dy2 + dz2; for (unsigned short int x = 0; x < latticeX; x++) { dx = atomX - x * latticeGridSpacing; dx2 = dx * dx; distance = sqrt(dx2 + dy2dz2); potential = charge / distance; sliceIdx = sliceYOffset + x; atomicAdd(&slice[sliceIdx], potential); } } } } void CPU(LATTICE_DATA_TYPE *lattice, const float *atomXs, const float *atomYs, const float *atomZs, const float *charges, const unsigned short int z, const unsigned int numOfAtoms, const unsigned short int latticeX, const unsigned short int latticeY, const LATTICE_DATA_TYPE latticeGridSpacing) { float atomX, atomY, atomZ, charge; const unsigned int latticeSliceGridSize = latticeX * latticeY; unsigned long int latticeZOffset; unsigned int latticeYOffset; unsigned long int latticeOffset; unsigned long int latticeIdx; LATTICE_DATA_TYPE dx, dy, dz, dx2, dy2, dz2, dy2dz2, distance; LATTICE_DATA_TYPE potential; for (unsigned int atomIdx = 0; atomIdx < numOfAtoms; atomIdx++) { atomX = atomXs[atomIdx]; atomY = atomYs[atomIdx]; atomZ = atomZs[atomIdx]; charge = charges[atomIdx]; latticeZOffset = latticeSliceGridSize * z; dz = atomZ - z * latticeGridSpacing; dz2 = dz * dz; for (unsigned short int y = 0; y < latticeY; y++) { latticeYOffset = latticeX * y; latticeOffset = latticeZOffset + latticeYOffset; dy = atomY - y * latticeGridSpacing; dy2 = dy * dy; dy2dz2 = dy2 + dz2; for (unsigned short int x = 0; x < latticeX; x++) { dx = atomX - x * latticeGridSpacing; dx2 = dx * dx; distance = sqrt(dx2 + dy2dz2); potential = charge / distance; latticeIdx = latticeOffset + x; lattice[latticeIdx] += potential; } } } } int main(int argc, char *argv[]) { double latticeW; double latticeH; double latticeD; double latticeGridSpacing; unsigned int numOfAtoms; float maxCharge; uint8_t numOfStreams; try { TCLAP::CmdLine cmd("Runs the Direct Couloumb Summation algorithm on the CPU & GPU (CUDA).", ' ', "1.0"); TCLAP::ValueArg<double> latticeWArg("x", "width", "Lattice width", true, 1.0f, "double"); TCLAP::ValueArg<double> latticeHArg("y", "height", "Lattice height", true, 1.0f, "double"); TCLAP::ValueArg<double> latticeDArg("z", "depth", "Lattice depth", true, 1.0f, "double"); TCLAP::ValueArg<double> latticeGridSpacingArg("g", "spacing", "Lattice grid spacing", true, 0.1f, "double"); TCLAP::ValueArg<unsigned int> numOfAtomsArg("a", "atoms", "Number of atoms", true, 1, "int"); TCLAP::ValueArg<double> maxChargeArg("c", "charge", "Maximum charge", true, 1.0f, "double"); TCLAP::ValueArg<unsigned int> numOfStreamsArg("n", "streams", "Number of CUDA streams", false, 2, "int"); cmd.add(numOfStreamsArg); cmd.add(maxChargeArg); cmd.add(numOfAtomsArg); cmd.add(latticeGridSpacingArg); cmd.add(latticeDArg); cmd.add(latticeHArg); cmd.add(latticeWArg); cmd.parse(argc, argv); latticeW = latticeWArg.getValue(); latticeH = latticeHArg.getValue(); latticeD = latticeDArg.getValue(); latticeGridSpacing = latticeGridSpacingArg.getValue(); numOfAtoms = numOfAtomsArg.getValue(); maxCharge = maxChargeArg.getValue(); numOfStreams = numOfStreamsArg.getValue(); } catch (TCLAP::ArgException &e) { fprintf(stderr, "Error in argument(s): %s\n", e.what()); return 1; } const unsigned short int latticeX = floor(latticeW / latticeGridSpacing) + 1; const unsigned short int latticeY = floor(latticeH / latticeGridSpacing) + 1; const unsigned short int latticeZ = floor(latticeD / latticeGridSpacing) + 1; const unsigned long int sliceGridSize = latticeX * latticeY; const unsigned long int latticeGridSize = sliceGridSize * latticeZ; float *h_AtomX, *h_AtomY, *h_AtomZ; float *h_Charge; float *d_AtomX, *d_AtomY, *d_AtomZ; float *d_Charge; LATTICE_DATA_TYPE *latticeCPU; LATTICE_DATA_TYPE *h_LatticeDCS; LATTICE_DATA_TYPE **d_SliceDCS; hipError_t cudaStatus; std::default_random_engine generator; std::uniform_real_distribution<float> latticeXDistribution(0, latticeX - 1); std::uniform_real_distribution<float> latticeYDistribution(0, latticeY - 1); std::uniform_real_distribution<float> latticeZDistribution(0, latticeZ - 1); std::uniform_real_distribution<float> chargeDistribution(0, maxCharge); uint8_t numOfRemainingLaunches; uint8_t streamIdx; hipStream_t *stream; unsigned long h_LatticeDCSOffset; clock_t mallocClock; double mallocDuration; GpuTimer cudaMallocTimer; GpuTimer cudaMemcpyHostDeviceTimer; clock_t randomGenerationClock; double randomGenerationDuration; GpuTimer DCSKernelTimer; clock_t CPUClock; double CPUDuration; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } mallocClock = clock(); stream = (hipStream_t*)malloc(numOfStreams * sizeof(hipStream_t)); hipHostMalloc((void**)&h_AtomX, numOfAtoms * sizeof(float), hipHostMallocDefault); hipHostMalloc((void**)&h_AtomY, numOfAtoms * sizeof(float), hipHostMallocDefault); hipHostMalloc((void**)&h_AtomZ, numOfAtoms * sizeof(float), hipHostMallocDefault); hipHostMalloc((void**)&h_Charge, numOfAtoms * sizeof(float), hipHostMallocDefault); hipHostMalloc((void**)&h_LatticeDCS, latticeGridSize * sizeof(LATTICE_DATA_TYPE), hipHostMallocDefault); hipHostMalloc((void**)&d_SliceDCS, numOfStreams * sizeof(LATTICE_DATA_TYPE*), hipHostMallocDefault); latticeCPU = (LATTICE_DATA_TYPE*)malloc(latticeGridSize * sizeof(LATTICE_DATA_TYPE)); memset(latticeCPU, 0, latticeGridSize * sizeof(LATTICE_DATA_TYPE)); mallocDuration = (clock() - mallocClock) / (double)CLOCKS_PER_SEC; printf("Memory allocation (host): %f ms\n", mallocDuration * 1000); cudaMallocTimer.Start(); cudaStatus = hipMalloc((void**)&d_AtomX, numOfAtoms * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc (atomX) failed!"); goto Error; } cudaStatus = hipMalloc((void**)&d_AtomY, numOfAtoms * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc (atomY) failed!"); goto Error; } cudaStatus = hipMalloc((void**)&d_AtomZ, numOfAtoms * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc (atomZ) failed!"); goto Error; } cudaStatus = hipMalloc((void**)&d_Charge, numOfAtoms * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc (charge) failed!"); goto Error; } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { cudaStatus = hipMalloc((void**)&d_SliceDCS[streamIdx], sliceGridSize * sizeof(LATTICE_DATA_TYPE)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc (DCS, slice[%i]) failed!", streamIdx); goto Error; } } cudaMallocTimer.Stop(); printf("Memory allocation (device): %f ms\n", cudaMallocTimer.Elapsed()); randomGenerationClock = clock(); for (unsigned int i = 0; i < numOfAtoms; i++) { h_AtomX[i] = latticeXDistribution(generator); h_AtomY[i] = latticeYDistribution(generator); h_AtomZ[i] = latticeZDistribution(generator); h_Charge[i] = chargeDistribution(generator); } randomGenerationDuration = (clock() - randomGenerationClock) / (double)CLOCKS_PER_SEC; printf("Random generation (CPU): %f ms\n", randomGenerationDuration * 1000); cudaMemcpyHostDeviceTimer.Start(); cudaStatus = hipMemcpy(d_AtomX, h_AtomX, numOfAtoms * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (atomX, host -> device) failed!"); goto Error; } cudaStatus = hipMemcpy(d_AtomY, h_AtomY, numOfAtoms * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (atomY, host -> device) failed!"); goto Error; } cudaStatus = hipMemcpy(d_AtomZ, h_AtomZ, numOfAtoms * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (atomZ, host -> device) failed!"); goto Error; } cudaStatus = hipMemcpy(d_Charge, h_Charge, numOfAtoms * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (charge, host -> device) failed!"); goto Error; } cudaMemcpyHostDeviceTimer.Stop(); printf("Memory copy (host -> device): %f ms\n", cudaMemcpyHostDeviceTimer.Elapsed()); dim3 dimBlockDCS(BLOCK_WIDTH * BLOCK_WIDTH, 1, 1); dim3 dimGridDCS((numOfAtoms - 1) / dimBlockDCS.x + 1, 1, 1); for (uint8_t streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { hipStreamCreate(&stream[streamIdx]); } DCSKernelTimer.Start(); h_LatticeDCSOffset = 0; if (latticeZ > 1) { for (unsigned short int z = 0; z < latticeZ; z += numOfStreams) { for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { hipMemsetAsync(d_SliceDCS[streamIdx], 0, sliceGridSize * sizeof(LATTICE_DATA_TYPE), stream[streamIdx]); } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { DCSKernel << <dimGridDCS, dimBlockDCS, 0, stream[streamIdx] >> >(d_SliceDCS[streamIdx], d_AtomX, d_AtomY, d_AtomZ, d_Charge, z + streamIdx, numOfAtoms, latticeX, latticeY, latticeGridSpacing); } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { h_LatticeDCSOffset = (z + streamIdx) * sliceGridSize; hipMemcpyAsync(h_LatticeDCS + h_LatticeDCSOffset, d_SliceDCS[streamIdx], sliceGridSize * sizeof(LATTICE_DATA_TYPE), hipMemcpyDeviceToHost, stream[streamIdx]); } } } numOfRemainingLaunches = latticeZ % numOfStreams; if (numOfRemainingLaunches != 0) { unsigned short int z = (latticeZ - numOfStreams); for (streamIdx = 0; streamIdx < numOfRemainingLaunches; streamIdx++) { hipMemsetAsync(d_SliceDCS[streamIdx], 0, sliceGridSize * sizeof(LATTICE_DATA_TYPE), stream[streamIdx]); } for (streamIdx = 0; streamIdx < numOfRemainingLaunches; streamIdx++) { DCSKernel << <dimGridDCS, dimBlockDCS, 0, stream[streamIdx] >> >(d_SliceDCS[streamIdx], d_AtomX, d_AtomY, d_AtomZ, d_Charge, z + streamIdx, numOfAtoms, latticeX, latticeY, latticeGridSpacing); } for (streamIdx = 0; streamIdx < numOfRemainingLaunches; streamIdx++) { h_LatticeDCSOffset = (z + streamIdx) * sliceGridSize; hipMemcpyAsync(h_LatticeDCS + h_LatticeDCSOffset, d_SliceDCS[streamIdx], sliceGridSize * sizeof(LATTICE_DATA_TYPE), hipMemcpyDeviceToHost, stream[streamIdx]); } } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { hipStreamSynchronize(stream[streamIdx]); } DCSKernelTimer.Stop(); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "DCSKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching DCSKernel!\n", cudaStatus); goto Error; } printf("DCSKernel duration: %f ms\n", DCSKernelTimer.Elapsed()); //CPU CPUClock = clock(); memset(latticeCPU, 0, latticeGridSize * sizeof(LATTICE_DATA_TYPE)); for (unsigned short int z = 0; z < latticeZ; z++) { CPU(latticeCPU, h_AtomX, h_AtomY, h_AtomZ, h_Charge, z, numOfAtoms, latticeX, latticeY, latticeGridSpacing); } CPUDuration = (clock() - CPUClock) / (double)CLOCKS_PER_SEC; printf("CPU duration: %f ms\n", CPUDuration * 1000); printf("DCS verification started.\n"); for (unsigned int i = 0; i < latticeGridSize; i++) { if (abs(latticeCPU[i] - h_LatticeDCS[i]) > EPS) { fprintf(stderr, "DCS Verification failed at element %i! latticeCPU[%i] = %f, latticeDCS[%i] = %f\n", i, i, latticeCPU[i], i, h_LatticeDCS[i]); return 1; } } printf("DCS verification PASSED.\n"); Error: free(h_AtomX); free(h_AtomY); free(h_AtomZ); free(latticeCPU); free(h_LatticeDCS); hipFree(d_AtomX); hipFree(d_AtomY); hipFree(d_AtomZ); for (uint8_t streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { hipStreamDestroy(stream[streamIdx]); hipFree(d_SliceDCS[streamIdx]); } hipError_t cudaStatusReset = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return cudaStatus; }
645751610c1f75c9eb00e595ba87153070ae9edf.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <stdio.h> #include <iostream> #include <ctime> #include <random> #include "GpuTimer.h" #include "tclap/CmdLine.h" #define EPS 0.03 #define BLOCK_WIDTH 16 #define LATTICE_DATA_TYPE float __global__ void DCSKernel(LATTICE_DATA_TYPE *slice, const float *atomXs, const float *atomYs, const float *atomZs, const float *charges, const unsigned short int z, const unsigned int numOfAtoms, const unsigned short int latticeX, const unsigned short int latticeY, const LATTICE_DATA_TYPE latticeGridSpacing) { unsigned int atomIdx = blockDim.x * blockIdx.x + threadIdx.x; if (atomIdx < numOfAtoms) { float atomX = atomXs[atomIdx]; float atomY = atomYs[atomIdx]; float atomZ = atomZs[atomIdx]; float charge = charges[atomIdx]; const unsigned int latticeSliceGridSize = latticeX * latticeY; unsigned int sliceYOffset; unsigned long int sliceIdx; LATTICE_DATA_TYPE dx, dy, dz, dx2, dy2, dz2, dy2dz2, distance; LATTICE_DATA_TYPE potential; dz = atomZ - z * latticeGridSpacing; dz2 = dz * dz; for (unsigned short int y = 0; y < latticeY; y++) { sliceYOffset = latticeX * y; dy = atomY - y * latticeGridSpacing; dy2 = dy * dy; dy2dz2 = dy2 + dz2; for (unsigned short int x = 0; x < latticeX; x++) { dx = atomX - x * latticeGridSpacing; dx2 = dx * dx; distance = sqrt(dx2 + dy2dz2); potential = charge / distance; sliceIdx = sliceYOffset + x; atomicAdd(&slice[sliceIdx], potential); } } } } void CPU(LATTICE_DATA_TYPE *lattice, const float *atomXs, const float *atomYs, const float *atomZs, const float *charges, const unsigned short int z, const unsigned int numOfAtoms, const unsigned short int latticeX, const unsigned short int latticeY, const LATTICE_DATA_TYPE latticeGridSpacing) { float atomX, atomY, atomZ, charge; const unsigned int latticeSliceGridSize = latticeX * latticeY; unsigned long int latticeZOffset; unsigned int latticeYOffset; unsigned long int latticeOffset; unsigned long int latticeIdx; LATTICE_DATA_TYPE dx, dy, dz, dx2, dy2, dz2, dy2dz2, distance; LATTICE_DATA_TYPE potential; for (unsigned int atomIdx = 0; atomIdx < numOfAtoms; atomIdx++) { atomX = atomXs[atomIdx]; atomY = atomYs[atomIdx]; atomZ = atomZs[atomIdx]; charge = charges[atomIdx]; latticeZOffset = latticeSliceGridSize * z; dz = atomZ - z * latticeGridSpacing; dz2 = dz * dz; for (unsigned short int y = 0; y < latticeY; y++) { latticeYOffset = latticeX * y; latticeOffset = latticeZOffset + latticeYOffset; dy = atomY - y * latticeGridSpacing; dy2 = dy * dy; dy2dz2 = dy2 + dz2; for (unsigned short int x = 0; x < latticeX; x++) { dx = atomX - x * latticeGridSpacing; dx2 = dx * dx; distance = sqrt(dx2 + dy2dz2); potential = charge / distance; latticeIdx = latticeOffset + x; lattice[latticeIdx] += potential; } } } } int main(int argc, char *argv[]) { double latticeW; double latticeH; double latticeD; double latticeGridSpacing; unsigned int numOfAtoms; float maxCharge; uint8_t numOfStreams; try { TCLAP::CmdLine cmd("Runs the Direct Couloumb Summation algorithm on the CPU & GPU (CUDA).", ' ', "1.0"); TCLAP::ValueArg<double> latticeWArg("x", "width", "Lattice width", true, 1.0f, "double"); TCLAP::ValueArg<double> latticeHArg("y", "height", "Lattice height", true, 1.0f, "double"); TCLAP::ValueArg<double> latticeDArg("z", "depth", "Lattice depth", true, 1.0f, "double"); TCLAP::ValueArg<double> latticeGridSpacingArg("g", "spacing", "Lattice grid spacing", true, 0.1f, "double"); TCLAP::ValueArg<unsigned int> numOfAtomsArg("a", "atoms", "Number of atoms", true, 1, "int"); TCLAP::ValueArg<double> maxChargeArg("c", "charge", "Maximum charge", true, 1.0f, "double"); TCLAP::ValueArg<unsigned int> numOfStreamsArg("n", "streams", "Number of CUDA streams", false, 2, "int"); cmd.add(numOfStreamsArg); cmd.add(maxChargeArg); cmd.add(numOfAtomsArg); cmd.add(latticeGridSpacingArg); cmd.add(latticeDArg); cmd.add(latticeHArg); cmd.add(latticeWArg); cmd.parse(argc, argv); latticeW = latticeWArg.getValue(); latticeH = latticeHArg.getValue(); latticeD = latticeDArg.getValue(); latticeGridSpacing = latticeGridSpacingArg.getValue(); numOfAtoms = numOfAtomsArg.getValue(); maxCharge = maxChargeArg.getValue(); numOfStreams = numOfStreamsArg.getValue(); } catch (TCLAP::ArgException &e) { fprintf(stderr, "Error in argument(s): %s\n", e.what()); return 1; } const unsigned short int latticeX = floor(latticeW / latticeGridSpacing) + 1; const unsigned short int latticeY = floor(latticeH / latticeGridSpacing) + 1; const unsigned short int latticeZ = floor(latticeD / latticeGridSpacing) + 1; const unsigned long int sliceGridSize = latticeX * latticeY; const unsigned long int latticeGridSize = sliceGridSize * latticeZ; float *h_AtomX, *h_AtomY, *h_AtomZ; float *h_Charge; float *d_AtomX, *d_AtomY, *d_AtomZ; float *d_Charge; LATTICE_DATA_TYPE *latticeCPU; LATTICE_DATA_TYPE *h_LatticeDCS; LATTICE_DATA_TYPE **d_SliceDCS; cudaError_t cudaStatus; std::default_random_engine generator; std::uniform_real_distribution<float> latticeXDistribution(0, latticeX - 1); std::uniform_real_distribution<float> latticeYDistribution(0, latticeY - 1); std::uniform_real_distribution<float> latticeZDistribution(0, latticeZ - 1); std::uniform_real_distribution<float> chargeDistribution(0, maxCharge); uint8_t numOfRemainingLaunches; uint8_t streamIdx; cudaStream_t *stream; unsigned long h_LatticeDCSOffset; clock_t mallocClock; double mallocDuration; GpuTimer cudaMallocTimer; GpuTimer cudaMemcpyHostDeviceTimer; clock_t randomGenerationClock; double randomGenerationDuration; GpuTimer DCSKernelTimer; clock_t CPUClock; double CPUDuration; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } mallocClock = clock(); stream = (cudaStream_t*)malloc(numOfStreams * sizeof(cudaStream_t)); cudaHostAlloc((void**)&h_AtomX, numOfAtoms * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&h_AtomY, numOfAtoms * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&h_AtomZ, numOfAtoms * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&h_Charge, numOfAtoms * sizeof(float), cudaHostAllocDefault); cudaHostAlloc((void**)&h_LatticeDCS, latticeGridSize * sizeof(LATTICE_DATA_TYPE), cudaHostAllocDefault); cudaHostAlloc((void**)&d_SliceDCS, numOfStreams * sizeof(LATTICE_DATA_TYPE*), cudaHostAllocDefault); latticeCPU = (LATTICE_DATA_TYPE*)malloc(latticeGridSize * sizeof(LATTICE_DATA_TYPE)); memset(latticeCPU, 0, latticeGridSize * sizeof(LATTICE_DATA_TYPE)); mallocDuration = (clock() - mallocClock) / (double)CLOCKS_PER_SEC; printf("Memory allocation (host): %f ms\n", mallocDuration * 1000); cudaMallocTimer.Start(); cudaStatus = cudaMalloc((void**)&d_AtomX, numOfAtoms * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc (atomX) failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_AtomY, numOfAtoms * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc (atomY) failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_AtomZ, numOfAtoms * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc (atomZ) failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_Charge, numOfAtoms * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc (charge) failed!"); goto Error; } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { cudaStatus = cudaMalloc((void**)&d_SliceDCS[streamIdx], sliceGridSize * sizeof(LATTICE_DATA_TYPE)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc (DCS, slice[%i]) failed!", streamIdx); goto Error; } } cudaMallocTimer.Stop(); printf("Memory allocation (device): %f ms\n", cudaMallocTimer.Elapsed()); randomGenerationClock = clock(); for (unsigned int i = 0; i < numOfAtoms; i++) { h_AtomX[i] = latticeXDistribution(generator); h_AtomY[i] = latticeYDistribution(generator); h_AtomZ[i] = latticeZDistribution(generator); h_Charge[i] = chargeDistribution(generator); } randomGenerationDuration = (clock() - randomGenerationClock) / (double)CLOCKS_PER_SEC; printf("Random generation (CPU): %f ms\n", randomGenerationDuration * 1000); cudaMemcpyHostDeviceTimer.Start(); cudaStatus = cudaMemcpy(d_AtomX, h_AtomX, numOfAtoms * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (atomX, host -> device) failed!"); goto Error; } cudaStatus = cudaMemcpy(d_AtomY, h_AtomY, numOfAtoms * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (atomY, host -> device) failed!"); goto Error; } cudaStatus = cudaMemcpy(d_AtomZ, h_AtomZ, numOfAtoms * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (atomZ, host -> device) failed!"); goto Error; } cudaStatus = cudaMemcpy(d_Charge, h_Charge, numOfAtoms * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (charge, host -> device) failed!"); goto Error; } cudaMemcpyHostDeviceTimer.Stop(); printf("Memory copy (host -> device): %f ms\n", cudaMemcpyHostDeviceTimer.Elapsed()); dim3 dimBlockDCS(BLOCK_WIDTH * BLOCK_WIDTH, 1, 1); dim3 dimGridDCS((numOfAtoms - 1) / dimBlockDCS.x + 1, 1, 1); for (uint8_t streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { cudaStreamCreate(&stream[streamIdx]); } DCSKernelTimer.Start(); h_LatticeDCSOffset = 0; if (latticeZ > 1) { for (unsigned short int z = 0; z < latticeZ; z += numOfStreams) { for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { cudaMemsetAsync(d_SliceDCS[streamIdx], 0, sliceGridSize * sizeof(LATTICE_DATA_TYPE), stream[streamIdx]); } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { DCSKernel << <dimGridDCS, dimBlockDCS, 0, stream[streamIdx] >> >(d_SliceDCS[streamIdx], d_AtomX, d_AtomY, d_AtomZ, d_Charge, z + streamIdx, numOfAtoms, latticeX, latticeY, latticeGridSpacing); } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { h_LatticeDCSOffset = (z + streamIdx) * sliceGridSize; cudaMemcpyAsync(h_LatticeDCS + h_LatticeDCSOffset, d_SliceDCS[streamIdx], sliceGridSize * sizeof(LATTICE_DATA_TYPE), cudaMemcpyDeviceToHost, stream[streamIdx]); } } } numOfRemainingLaunches = latticeZ % numOfStreams; if (numOfRemainingLaunches != 0) { unsigned short int z = (latticeZ - numOfStreams); for (streamIdx = 0; streamIdx < numOfRemainingLaunches; streamIdx++) { cudaMemsetAsync(d_SliceDCS[streamIdx], 0, sliceGridSize * sizeof(LATTICE_DATA_TYPE), stream[streamIdx]); } for (streamIdx = 0; streamIdx < numOfRemainingLaunches; streamIdx++) { DCSKernel << <dimGridDCS, dimBlockDCS, 0, stream[streamIdx] >> >(d_SliceDCS[streamIdx], d_AtomX, d_AtomY, d_AtomZ, d_Charge, z + streamIdx, numOfAtoms, latticeX, latticeY, latticeGridSpacing); } for (streamIdx = 0; streamIdx < numOfRemainingLaunches; streamIdx++) { h_LatticeDCSOffset = (z + streamIdx) * sliceGridSize; cudaMemcpyAsync(h_LatticeDCS + h_LatticeDCSOffset, d_SliceDCS[streamIdx], sliceGridSize * sizeof(LATTICE_DATA_TYPE), cudaMemcpyDeviceToHost, stream[streamIdx]); } } for (streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { cudaStreamSynchronize(stream[streamIdx]); } DCSKernelTimer.Stop(); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "DCSKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching DCSKernel!\n", cudaStatus); goto Error; } printf("DCSKernel duration: %f ms\n", DCSKernelTimer.Elapsed()); //CPU CPUClock = clock(); memset(latticeCPU, 0, latticeGridSize * sizeof(LATTICE_DATA_TYPE)); for (unsigned short int z = 0; z < latticeZ; z++) { CPU(latticeCPU, h_AtomX, h_AtomY, h_AtomZ, h_Charge, z, numOfAtoms, latticeX, latticeY, latticeGridSpacing); } CPUDuration = (clock() - CPUClock) / (double)CLOCKS_PER_SEC; printf("CPU duration: %f ms\n", CPUDuration * 1000); printf("DCS verification started.\n"); for (unsigned int i = 0; i < latticeGridSize; i++) { if (abs(latticeCPU[i] - h_LatticeDCS[i]) > EPS) { fprintf(stderr, "DCS Verification failed at element %i! latticeCPU[%i] = %f, latticeDCS[%i] = %f\n", i, i, latticeCPU[i], i, h_LatticeDCS[i]); return 1; } } printf("DCS verification PASSED.\n"); Error: free(h_AtomX); free(h_AtomY); free(h_AtomZ); free(latticeCPU); free(h_LatticeDCS); cudaFree(d_AtomX); cudaFree(d_AtomY); cudaFree(d_AtomZ); for (uint8_t streamIdx = 0; streamIdx < numOfStreams; streamIdx++) { cudaStreamDestroy(stream[streamIdx]); cudaFree(d_SliceDCS[streamIdx]); } cudaError_t cudaStatusReset = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return cudaStatus; }
2a0c8eda04024853c3b2add0da5e64df46660651.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/WrapDimUtils.h> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <c10/macros/Macros.h> #include <ATen/AccumulateType.h> #include <ATen/hip/NumericLimits.cuh> #include <type_traits> #include <ATen/native/hip/PersistentSoftmax.cuh> namespace at { namespace native { namespace { template<typename T, typename AccumT, typename OutT> struct LogSoftMaxForwardEpilogue { __device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : logsum(max_input + ::log(sum)) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(input - logsum); } const AccumT logsum; }; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxBackwardEpilogue { __device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum); } const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxForwardEpilogue { __device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input) , sum(sum) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(::exp(input - max_input) / sum); } const AccumT max_input; const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxBackwardEpilogue { __device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} // XXX: gradOutput that we get here is really gradOutput * output // Look for cmul in SoftMax_updateGradInput __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - output * sum); } const AccumT sum; }; //////////////////////////////////////////////////////////////////////////////// // Spatial kernel (fast with large inner_size and small dim_size) //////////////////////////////////////////////////////////////////////////////// // Let's assume that our input has been flattened to have only three dimension: // outer x dim x inner // The spatial algorithm tries to parallelize along all of them. // Within a 2d block threadIdx.y parallelizes over dim slices, and threads that // share it will speed up reductions over dim (along axis x). // The 2d grid is used to parallelize inner dimension over y axis and outer over x. inline dim3 SpatialSoftMax_getGridSize( dim3 block, uint32_t max_active_blocks, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { // First, tile as many blocks as we can over the y axis uint32_t inner_blocks = (inner_size + block.y - 1) / block.y; if (inner_blocks > max_active_blocks) inner_blocks = max_active_blocks; // Fill the x axis with as many blocks as we can fit (a little more is ok too) uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks; if (outer_blocks > outer_size) outer_blocks = outer_size; return dim3(outer_blocks, inner_blocks); } const int max_threads = 1024; inline dim3 SpatialSoftMax_getBlockSize( uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { uint32_t inner_threads = inner_size; inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads)); uint32_t dim_threads = 1; if (inner_threads <= 64 && dim_size >= 64) { while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) dim_threads *= 2; dim_threads /= 2; } return dim3(dim_threads, inner_threads); } template<typename accscalar_t, typename Kernel> void SpatialSoftMax_getLaunchSizes( Kernel k, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size, dim3& grid, dim3& block, uint32_t& smem_size) { block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size); uint32_t block_threads = block.x * block.y; smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t); int max_active_blocks; #ifdef __HIP_PLATFORM_HCC__ // XXX HIP function signature is not compatible yet. uint32_t max_blocks; hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks, k, block_threads, smem_size); max_active_blocks = max_blocks; #else hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, k, block_threads, smem_size); #endif max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount; grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size); } inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t block_size = 1; uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads)); while (block_size < max_block_size) block_size *= 2; // Launch at least a single warp - the kernel assumes that. block_size = ::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE)); return dim3(block_size); } template<typename T> struct Add { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct Max { __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } }; // Note that it's not a complete block-wide reduction. // Only threads that share threadIdx.y reduce values. template<typename T, template<typename> class ReduceOp> __forceinline__ __device__ T spatialBlockReduceX(T *shared, T val) { ReduceOp<T> r; shared += threadIdx.y * blockDim.x; __syncthreads(); shared[threadIdx.x] = val; // NOTE: loop starts with __syncthreads() int offset = blockDim.x / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]); offset /= 2; } __syncthreads(); return shared[0]; } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxForward( outscalar_t *output, scalar_t *input, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; //////////////////////////////////////////////////////////// // These two blocks are really eqivalent, but specializing on // blockDim.x == 1 makes the kernel faster when it's unused. // I didn't want to thread an extra template parameter, and nvcc // seems to be smart enough to hoist the if outside of the loops. //////////////////////////////////////////////////////////// if (blockDim.x > 1) { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input); accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } else { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } } } } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxBackward( scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; // See the comment in forward kernel if (blockDim.x > 1) { accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += gradOutput[data_offset + d * dim_stride]; sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } else { accscalar_t sum = 0; for (uint32_t d = 0; d < dim_size; d++) sum += gradOutput[data_offset + d * dim_stride]; Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = 0; d < dim_size; d++) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } } } } //////////////////////////////////////////////////////////////////////////////// // Regular kernel (fast when dim_size is large; requires inner_size == 1) //////////////////////////////////////////////////////////////////////////////// template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return ::max(max, (AccumT)v); } }; template<typename T, typename AccumT> struct AddFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(AccumT v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + ::exp(v - max_k); } const AccumT max_k; }; template <template<typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT>& r, AccumT defaultVal) { // To avoid RaW races from chaining blockReduce calls together, we need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1; if (threadIdx.x < C10_WARP_SIZE) { int lane = threadIdx.x % C10_WARP_SIZE; if (lane < blockDim.x / C10_WARP_SIZE) { #pragma unroll for (int i = 0; i < C10_WARP_SIZE; ++i) { warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]); } #if TORCH_HIP_VERSION >= 9000 __syncwarp(mask); #endif smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = blockVal; } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { AccumT threadVal = defaultVal; int offset = threadIdx.x; int last = size % (ILP * blockDim.x); // Body (unroll by ILP times) for (; offset < size - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = data[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) threadVal = r(threadVal, tmp[j]); } // Epilogue for (; offset < size; offset += blockDim.x) threadVal = r(threadVal, data[offset]); return threadVal; } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; // find the max accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>( input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); accscalar_t max_k = blockReduce<Max, accscalar_t>( sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); // reduce all values accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>( input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0)); accscalar_t sumAll = blockReduce<Add, accscalar_t>( sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { scalar_t tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = input[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) output[offset + j * blockDim.x] = epilogue(tmp[j]); } for (; offset < classes; offset += blockDim.x) output[offset] = epilogue(input[offset]); } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>( gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0)); accscalar_t sum_k = blockReduce<Add, accscalar_t>( sdata, threadSum, Add<accscalar_t>(), accscalar_t(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { outscalar_t tmpGradOutput[ILP]; outscalar_t tmpOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; tmpOutput[j] = output[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]); } for (; offset < classes; offset += blockDim.x) gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){ if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only"); auto input = input_.contiguous(); Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float)) : at::empty_like(input, at::MemoryFormat::Contiguous); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (input.dim() == 0) input = input.view(1); int64_t dim = maybe_wrap_dim(dim_, input.dim()); TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions"); int64_t outer_size = 1; int64_t dim_size = input.size(dim); if (input.numel() > 0) { int64_t inner_size = 1; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); for (int64_t i = 0; i < dim; ++i) outer_size *= input.size(i); for (int64_t i = dim + 1; i < input.dim(); ++i) inner_size *= input.size(i); // This kernel spawns a block per each element in the batch. // XXX: it assumes that inner_size == 1 if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } }); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(hipGetLastError()); } return output; } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){ int64_t dim = maybe_wrap_dim(dim_, grad_.dim()); Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half)) : at::empty_like(grad_, at::MemoryFormat::Contiguous); if (grad_.numel() == 0) { return gI; } auto grad = grad_.contiguous(); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (grad.dim() == 0) grad = grad.view(1); TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions"); auto output = output_.contiguous(); if (output.dim() == 0) output = output.view(1); int64_t outer_size = 1; int64_t dim_size = output.size(dim); int64_t inner_size = 1; for (int64_t i = 0; i < dim; ++i) outer_size *= output.size(i); for (int64_t i = dim + 1; i < output.dim(); ++i) inner_size *= output.size(i); // See descriptions of kernels above. hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size); } else { hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size ); } } }); } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(hipGetLastError()); return gI; } } Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float); } Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float); } Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float); } Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } Tensor tmp = grad * output; return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float); } } }
2a0c8eda04024853c3b2add0da5e64df46660651.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/WrapDimUtils.h> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <c10/macros/Macros.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/NumericLimits.cuh> #include <type_traits> #include <ATen/native/cuda/PersistentSoftmax.cuh> namespace at { namespace native { namespace { template<typename T, typename AccumT, typename OutT> struct LogSoftMaxForwardEpilogue { __device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : logsum(max_input + std::log(sum)) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(input - logsum); } const AccumT logsum; }; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxBackwardEpilogue { __device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum); } const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxForwardEpilogue { __device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input) , sum(sum) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(std::exp(input - max_input) / sum); } const AccumT max_input; const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxBackwardEpilogue { __device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} // XXX: gradOutput that we get here is really gradOutput * output // Look for cmul in SoftMax_updateGradInput __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - output * sum); } const AccumT sum; }; //////////////////////////////////////////////////////////////////////////////// // Spatial kernel (fast with large inner_size and small dim_size) //////////////////////////////////////////////////////////////////////////////// // Let's assume that our input has been flattened to have only three dimension: // outer x dim x inner // The spatial algorithm tries to parallelize along all of them. // Within a 2d block threadIdx.y parallelizes over dim slices, and threads that // share it will speed up reductions over dim (along axis x). // The 2d grid is used to parallelize inner dimension over y axis and outer over x. inline dim3 SpatialSoftMax_getGridSize( dim3 block, uint32_t max_active_blocks, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { // First, tile as many blocks as we can over the y axis uint32_t inner_blocks = (inner_size + block.y - 1) / block.y; if (inner_blocks > max_active_blocks) inner_blocks = max_active_blocks; // Fill the x axis with as many blocks as we can fit (a little more is ok too) uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks; if (outer_blocks > outer_size) outer_blocks = outer_size; return dim3(outer_blocks, inner_blocks); } const int max_threads = 1024; inline dim3 SpatialSoftMax_getBlockSize( uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { uint32_t inner_threads = inner_size; inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads)); uint32_t dim_threads = 1; if (inner_threads <= 64 && dim_size >= 64) { while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) dim_threads *= 2; dim_threads /= 2; } return dim3(dim_threads, inner_threads); } template<typename accscalar_t, typename Kernel> void SpatialSoftMax_getLaunchSizes( Kernel k, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size, dim3& grid, dim3& block, uint32_t& smem_size) { block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size); uint32_t block_threads = block.x * block.y; smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t); int max_active_blocks; #ifdef __HIP_PLATFORM_HCC__ // XXX HIP function signature is not compatible yet. uint32_t max_blocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks, k, block_threads, smem_size); max_active_blocks = max_blocks; #else cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, k, block_threads, smem_size); #endif max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount; grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size); } inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t block_size = 1; uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads)); while (block_size < max_block_size) block_size *= 2; // Launch at least a single warp - the kernel assumes that. block_size = std::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE)); return dim3(block_size); } template<typename T> struct Add { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct Max { __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } }; // Note that it's not a complete block-wide reduction. // Only threads that share threadIdx.y reduce values. template<typename T, template<typename> class ReduceOp> __forceinline__ __device__ T spatialBlockReduceX(T *shared, T val) { ReduceOp<T> r; shared += threadIdx.y * blockDim.x; __syncthreads(); shared[threadIdx.x] = val; // NOTE: loop starts with __syncthreads() int offset = blockDim.x / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]); offset /= 2; } __syncthreads(); return shared[0]; } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxForward( outscalar_t *output, scalar_t *input, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; //////////////////////////////////////////////////////////// // These two blocks are really eqivalent, but specializing on // blockDim.x == 1 makes the kernel faster when it's unused. // I didn't want to thread an extra template parameter, and nvcc // seems to be smart enough to hoist the if outside of the loops. //////////////////////////////////////////////////////////// if (blockDim.x > 1) { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input); accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } else { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } } } } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxBackward( scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; // See the comment in forward kernel if (blockDim.x > 1) { accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += gradOutput[data_offset + d * dim_stride]; sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } else { accscalar_t sum = 0; for (uint32_t d = 0; d < dim_size; d++) sum += gradOutput[data_offset + d * dim_stride]; Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = 0; d < dim_size; d++) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } } } } //////////////////////////////////////////////////////////////////////////////// // Regular kernel (fast when dim_size is large; requires inner_size == 1) //////////////////////////////////////////////////////////////////////////////// template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return ::max(max, (AccumT)v); } }; template<typename T, typename AccumT> struct AddFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(AccumT v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + std::exp(v - max_k); } const AccumT max_k; }; template <template<typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT>& r, AccumT defaultVal) { // To avoid RaW races from chaining blockReduce calls together, we need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1; if (threadIdx.x < C10_WARP_SIZE) { int lane = threadIdx.x % C10_WARP_SIZE; if (lane < blockDim.x / C10_WARP_SIZE) { #pragma unroll for (int i = 0; i < C10_WARP_SIZE; ++i) { warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]); } #if CUDA_VERSION >= 9000 __syncwarp(mask); #endif smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = blockVal; } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { AccumT threadVal = defaultVal; int offset = threadIdx.x; int last = size % (ILP * blockDim.x); // Body (unroll by ILP times) for (; offset < size - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = data[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) threadVal = r(threadVal, tmp[j]); } // Epilogue for (; offset < size; offset += blockDim.x) threadVal = r(threadVal, data[offset]); return threadVal; } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; // find the max accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>( input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); accscalar_t max_k = blockReduce<Max, accscalar_t>( sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); // reduce all values accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>( input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0)); accscalar_t sumAll = blockReduce<Add, accscalar_t>( sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { scalar_t tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) tmp[j] = input[offset + j * blockDim.x]; #pragma unroll for (int j = 0; j < ILP; ++j) output[offset + j * blockDim.x] = epilogue(tmp[j]); } for (; offset < classes; offset += blockDim.x) output[offset] = epilogue(input[offset]); } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>( gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0)); accscalar_t sum_k = blockReduce<Add, accscalar_t>( sdata, threadSum, Add<accscalar_t>(), accscalar_t(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k); int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { outscalar_t tmpGradOutput[ILP]; outscalar_t tmpOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; tmpOutput[j] = output[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]); } for (; offset < classes; offset += blockDim.x) gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){ if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only"); auto input = input_.contiguous(); Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float)) : at::empty_like(input, at::MemoryFormat::Contiguous); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (input.dim() == 0) input = input.view(1); int64_t dim = maybe_wrap_dim(dim_, input.dim()); TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions"); int64_t outer_size = 1; int64_t dim_size = input.size(dim); if (input.numel() > 0) { int64_t inner_size = 1; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); for (int64_t i = 0; i < dim; ++i) outer_size *= input.size(i); for (int64_t i = dim + 1; i < input.dim(); ++i) inner_size *= input.size(i); // This kernel spawns a block per each element in the batch. // XXX: it assumes that inner_size == 1 if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } }); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(cudaGetLastError()); } return output; } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){ int64_t dim = maybe_wrap_dim(dim_, grad_.dim()); Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half)) : at::empty_like(grad_, at::MemoryFormat::Contiguous); if (grad_.numel() == 0) { return gI; } auto grad = grad_.contiguous(); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (grad.dim() == 0) grad = grad.view(1); TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions"); auto output = output_.contiguous(); if (output.dim() == 0) output = output.view(1); int64_t outer_size = 1; int64_t dim_size = output.size(dim); int64_t inner_size = 1; for (int64_t i = 0; i < dim; ++i) outer_size *= output.size(i); for (int64_t i = dim + 1; i < output.dim(); ++i) inner_size *= output.size(i); // See descriptions of kernels above. cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (inner_size == 1) { const int ILP = 2; dim3 grid(outer_size); dim3 block = SoftMax_getBlockSize(ILP, dim_size); AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size); } else { cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size ); } } }); } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), outer_size, dim_size, inner_size ); } }); } THCudaCheck(cudaGetLastError()); return gI; } } Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float); } Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float); } Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float); } Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } Tensor tmp = grad * output; return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float); } } }
6e6dc013b104f4766d02430d0d4d543bfb229ff9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The "wb.h" file(library) has been included in this code First compile dataset_generator.cpp. You may use any no. of pixels in the x and y dimensions. The dataset_generator will output input.ppm and output.ppm Compile this file using "./a.out input.ppm output.pbm" */ #include "wb.h" //@@ define error checking macro here. #define errCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ printErrorLog(ERROR, "Failed to run stmt ", #stmt); \ printErrorLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) //@@ INSERT CODE HERE __global__ void ImageGrayScaleKernel(float* deviceInputImageData,float* deviceOutputImageData,int imageHeight,int imageWidth) { int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; if(row<imageHeight && col<imageWidth) { int idx=row*imageWidth+col; float r=deviceInputImageData[3*idx]; float g=deviceInputImageData[3*idx+1]; float b=deviceInputImageData[3*idx+2]; deviceOutputImageData[idx] = (0.21f * r + 0.71f * g + 0.07f * b); } } int main(int argc, char *argv[]) { int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *deviceInputImageData; float *deviceOutputImageData; /* parse the input arguments */ //@@ Insert code here wbArg_t args; args = wbArg_read(argc, argv); inputImageFile = wbArg_getInputFile(args, 0); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); // For this lab the value is always 3 imageChannels = wbImage_getChannels(inputImage); // Since the image is monochromatic, it only contains one channel outputImage = wbImage_new(imageWidth, imageHeight, 1); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); /////////////////////////////////////////////////////// wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE dim3 dimBlock(32,32); dim3 dimGrid(ceil(imageWidth/32.0),ceil(imageHeight/32.0)); hipLaunchKernelGGL(( ImageGrayScaleKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData,deviceOutputImageData,imageHeight,imageWidth); wbTime_stop(Compute, "Doing the computation on the GPU"); /////////////////////////////////////////////////////// wbTime_start(Copy, "Copying data from the GPU"); hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(args, outputImage); hipFree(deviceInputImageData); hipFree(deviceOutputImageData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
6e6dc013b104f4766d02430d0d4d543bfb229ff9.cu
/* The "wb.h" file(library) has been included in this code First compile dataset_generator.cpp. You may use any no. of pixels in the x and y dimensions. The dataset_generator will output input.ppm and output.ppm Compile this file using "./a.out input.ppm output.pbm" */ #include "wb.h" //@@ define error checking macro here. #define errCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printErrorLog(ERROR, "Failed to run stmt ", #stmt); \ printErrorLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) //@@ INSERT CODE HERE __global__ void ImageGrayScaleKernel(float* deviceInputImageData,float* deviceOutputImageData,int imageHeight,int imageWidth) { int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; if(row<imageHeight && col<imageWidth) { int idx=row*imageWidth+col; float r=deviceInputImageData[3*idx]; float g=deviceInputImageData[3*idx+1]; float b=deviceInputImageData[3*idx+2]; deviceOutputImageData[idx] = (0.21f * r + 0.71f * g + 0.07f * b); } } int main(int argc, char *argv[]) { int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *deviceInputImageData; float *deviceOutputImageData; /* parse the input arguments */ //@@ Insert code here wbArg_t args; args = wbArg_read(argc, argv); inputImageFile = wbArg_getInputFile(args, 0); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); // For this lab the value is always 3 imageChannels = wbImage_getChannels(inputImage); // Since the image is monochromatic, it only contains one channel outputImage = wbImage_new(imageWidth, imageHeight, 1); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); /////////////////////////////////////////////////////// wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE dim3 dimBlock(32,32); dim3 dimGrid(ceil(imageWidth/32.0),ceil(imageHeight/32.0)); ImageGrayScaleKernel<<<dimGrid,dimBlock>>>(deviceInputImageData,deviceOutputImageData,imageHeight,imageWidth); wbTime_stop(Compute, "Doing the computation on the GPU"); /////////////////////////////////////////////////////// wbTime_start(Copy, "Copying data from the GPU"); cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(args, outputImage); cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
135c39c3dcc6af6a9524e0131806b936603715f4.hip
// !!! This is a file automatically generated by hipify!!! /* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.6 * copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre * Date: April 2020 * Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es) * All rights reserved */ #include <string.h> #include <cstdio> #include <cstdlib> #include <iostream> #include <hip/hip_runtime.h> #include "eddl/hardware/gpu/gpu_kernels.h" __global__ void reduce_mean(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { atomicAdd(&(B[map[thread_id_x]]),A[thread_id_x]); } } __global__ void reduce_op_sum(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]+=B[map[thread_id_x]]; } } __global__ void reduce_op_diff(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]-=B[map[thread_id_x]]; } } __global__ void reduce_op_mult(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]*=B[map[thread_id_x]]; } } __global__ void reduce_op_div(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]/=B[map[thread_id_x]]; } } //dim3 dimGrid(RD->index.size()); //dim3 dimBlock(1); __global__ void reduction_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; int j; float sum=0; float v,val; int i; int p=rs*blockIdx.x; for(j=0;j<rs;j++,p++) { v=I[ind[p]]; if (m==2) { if (j==0) {val=v;i=p;} else if (v>val) { val=v; i=p; } } else if (m==3) { if (j==0) {val=v;i=p;} else if (v<val) { val=v; i=p; } } else sum+=v; } p=rs*blockIdx.x; // set in Output if (m<2) { // mean or sum if (m==0) sum/=d; if (keepdims) { for(j=0;j<rs;j++,p++) O[ind[p]]=sum; } else O[thread_id_x]=sum; } else { // rs or min if (keepdims) { for(j=0;j<rs;j++,p++) { O[ind[p]]=val; S[ind[p]]=i; } } else { O[thread_id_x]=val; S[thread_id_x]=i; } } } //dim3 dimGrid(RD->index.size()); //dim3 dimBlock(1); __global__ void reduction_back_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; int j; float val=0; int p; // set in Delta if (m>=2) { int p=S[thread_id_x]; O[p]+=I[thread_id_x]; } else { p=rs*blockIdx.x; if(keepdims) { for(j=0;j<rs;j++,p++) val+=I[ind[p]]; } else val=I[thread_id_x]; if (m==0) val/=d; p=rs*blockIdx.x; for(j=0;j<rs;j++,p++) O[ind[p]]+=val; } } //////////////////// // FOR SUM and MEAN // Faster in Conv /////////////////// //dim3 dimGrid(red_size); //dim3 dimBlock(RD->index.size()); __global__ void reduction_permute(float *I,float *O,int *ind,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) O[thread_id_x]=I[ind[thread_id_x]]; } __global__ void reduction_kernel_keep(float *red, float *O, int *ind, int size, int rsize) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size*rsize) { O[ind[thread_id_x]]=red[thread_id_x/rsize]; } } __global__ void reduction_kernel_keep_inc(float *red, float *O, int *ind, int size, int rsize) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size*rsize) { O[ind[thread_id_x]]+=red[thread_id_x/rsize]; } }
135c39c3dcc6af6a9524e0131806b936603715f4.cu
/* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.6 * copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre * Date: April 2020 * Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es) * All rights reserved */ #include <string.h> #include <cstdio> #include <cstdlib> #include <iostream> #include <cuda.h> #include "eddl/hardware/gpu/gpu_kernels.h" __global__ void reduce_mean(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { atomicAdd(&(B[map[thread_id_x]]),A[thread_id_x]); } } __global__ void reduce_op_sum(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]+=B[map[thread_id_x]]; } } __global__ void reduce_op_diff(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]-=B[map[thread_id_x]]; } } __global__ void reduce_op_mult(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]*=B[map[thread_id_x]]; } } __global__ void reduce_op_div(float *A,float *B,int *map,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) { A[thread_id_x]/=B[map[thread_id_x]]; } } //dim3 dimGrid(RD->index.size()); //dim3 dimBlock(1); __global__ void reduction_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; int j; float sum=0; float v,val; int i; int p=rs*blockIdx.x; for(j=0;j<rs;j++,p++) { v=I[ind[p]]; if (m==2) { if (j==0) {val=v;i=p;} else if (v>val) { val=v; i=p; } } else if (m==3) { if (j==0) {val=v;i=p;} else if (v<val) { val=v; i=p; } } else sum+=v; } p=rs*blockIdx.x; // set in Output if (m<2) { // mean or sum if (m==0) sum/=d; if (keepdims) { for(j=0;j<rs;j++,p++) O[ind[p]]=sum; } else O[thread_id_x]=sum; } else { // rs or min if (keepdims) { for(j=0;j<rs;j++,p++) { O[ind[p]]=val; S[ind[p]]=i; } } else { O[thread_id_x]=val; S[thread_id_x]=i; } } } //dim3 dimGrid(RD->index.size()); //dim3 dimBlock(1); __global__ void reduction_back_kernel(float *I,float *O,float *S,int m, int keepdims,int d,int *ind,int rs) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; int j; float val=0; int p; // set in Delta if (m>=2) { int p=S[thread_id_x]; O[p]+=I[thread_id_x]; } else { p=rs*blockIdx.x; if(keepdims) { for(j=0;j<rs;j++,p++) val+=I[ind[p]]; } else val=I[thread_id_x]; if (m==0) val/=d; p=rs*blockIdx.x; for(j=0;j<rs;j++,p++) O[ind[p]]+=val; } } //////////////////// // FOR SUM and MEAN // Faster in Conv /////////////////// //dim3 dimGrid(red_size); //dim3 dimBlock(RD->index.size()); __global__ void reduction_permute(float *I,float *O,int *ind,int size) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size) O[thread_id_x]=I[ind[thread_id_x]]; } __global__ void reduction_kernel_keep(float *red, float *O, int *ind, int size, int rsize) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size*rsize) { O[ind[thread_id_x]]=red[thread_id_x/rsize]; } } __global__ void reduction_kernel_keep_inc(float *red, float *O, int *ind, int size, int rsize) { long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x<size*rsize) { O[ind[thread_id_x]]+=red[thread_id_x/rsize]; } }
cc9129ca362936238efb574545af42414b9ea3ed.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "MatrixCopy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( MatrixCopy), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( MatrixCopy), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( MatrixCopy), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cc9129ca362936238efb574545af42414b9ea3ed.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "MatrixCopy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); MatrixCopy<<<gridBlock,threadBlock>>>(in,out,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { MatrixCopy<<<gridBlock,threadBlock>>>(in,out,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { MatrixCopy<<<gridBlock,threadBlock>>>(in,out,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d311db899f36e6658555c33f4bda3864697da2ee.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> void checkCUDAError(const char *msg); __global__ void Kernel(int *dat); main() { int *dat_local, *dat_remote; int gx,gy; int bx,by,bz; int size; int numthreads,j; int driver_version = 0, runtime_version = 0; hipDriverGetVersion(&driver_version); hipRuntimeGetVersion(&runtime_version); printf("Driver Version: %d\n" "Runtime Version: %d\n", driver_version, runtime_version); printf(" %s\n","Enter grid dimensions: gx gy"); scanf("%d %d",&gx,&gy); printf(" %s\n","Enter block dimensions: bx by bz"); scanf("%d %d %d",&bx,&by,&bz); printf(" Grid dimensions: %3d%4d\n",gx,gy); printf(" Block dimensions: %3d%4d%4d\n",bx,by,bz); dim3 dimGrid(gx,gy); dim3 dimBlock(bx,by,bz); numthreads=gx*gy*bx*by*bz; size=6*sizeof(int)*numthreads; hipMalloc((void**) &dat_remote, size); checkCUDAError("hipMalloc"); dat_local=(int*)malloc(size); hipLaunchKernelGGL(( Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dat_remote); checkCUDAError("Kernel"); hipMemcpy(dat_local, dat_remote, size,hipMemcpyDeviceToHost); checkCUDAError("copy"); printf("%s\n","thread blockid(x y) threadid(x y z)"); for(int i=0;i<numthreads;i++) { j=i*6; printf("%6d %3d %3d %3d %3d %3d\n", dat_local[j], dat_local[j+1],dat_local[j+2], dat_local[j+3],dat_local[j+4],dat_local[j+5]); } } __global__ void Kernel(int *dat) { /* get my block within a grid */ int myblock=blockIdx.x+blockIdx.y*gridDim.x; /* how big is each block within a grid */ int blocksize=blockDim.x*blockDim.y*blockDim.z; /* get thread within a block */ int subthread=threadIdx.z*(blockDim.x*blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x; /* find my thread */ int thread=myblock*blocksize+subthread; #if __DEVICE_EMULATION__ printf("gridDim=(%3d %3d) blockIdx=(%3d %3d) blockDim=(%3d %3d %3d) threadIdx=(%3d %3d %3d) %6d\n", gridDim.x,gridDim.y, blockIdx.x,blockIdx.y, blockDim.x,blockDim.y,blockDim.z, threadIdx.x,threadIdx.y,threadIdx.z,thread); #endif /* starting index into array */ int index=thread*6; dat[index]=thread; dat[index+1]=blockIdx.x; dat[index+2]=blockIdx.y; dat[index+3]=threadIdx.x; dat[index+4]=threadIdx.y; dat[index+5]=threadIdx.z; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
d311db899f36e6658555c33f4bda3864697da2ee.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> void checkCUDAError(const char *msg); __global__ void Kernel(int *dat); main() { int *dat_local, *dat_remote; int gx,gy; int bx,by,bz; int size; int numthreads,j; int driver_version = 0, runtime_version = 0; cudaDriverGetVersion(&driver_version); cudaRuntimeGetVersion(&runtime_version); printf("Driver Version: %d\n" "Runtime Version: %d\n", driver_version, runtime_version); printf(" %s\n","Enter grid dimensions: gx gy"); scanf("%d %d",&gx,&gy); printf(" %s\n","Enter block dimensions: bx by bz"); scanf("%d %d %d",&bx,&by,&bz); printf(" Grid dimensions: %3d%4d\n",gx,gy); printf(" Block dimensions: %3d%4d%4d\n",bx,by,bz); dim3 dimGrid(gx,gy); dim3 dimBlock(bx,by,bz); numthreads=gx*gy*bx*by*bz; size=6*sizeof(int)*numthreads; cudaMalloc((void**) &dat_remote, size); checkCUDAError("cudaMalloc"); dat_local=(int*)malloc(size); Kernel<<<dimGrid,dimBlock>>>(dat_remote); checkCUDAError("Kernel"); cudaMemcpy(dat_local, dat_remote, size,cudaMemcpyDeviceToHost); checkCUDAError("copy"); printf("%s\n","thread blockid(x y) threadid(x y z)"); for(int i=0;i<numthreads;i++) { j=i*6; printf("%6d %3d %3d %3d %3d %3d\n", dat_local[j], dat_local[j+1],dat_local[j+2], dat_local[j+3],dat_local[j+4],dat_local[j+5]); } } __global__ void Kernel(int *dat) { /* get my block within a grid */ int myblock=blockIdx.x+blockIdx.y*gridDim.x; /* how big is each block within a grid */ int blocksize=blockDim.x*blockDim.y*blockDim.z; /* get thread within a block */ int subthread=threadIdx.z*(blockDim.x*blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x; /* find my thread */ int thread=myblock*blocksize+subthread; #if __DEVICE_EMULATION__ printf("gridDim=(%3d %3d) blockIdx=(%3d %3d) blockDim=(%3d %3d %3d) threadIdx=(%3d %3d %3d) %6d\n", gridDim.x,gridDim.y, blockIdx.x,blockIdx.y, blockDim.x,blockDim.y,blockDim.z, threadIdx.x,threadIdx.y,threadIdx.z,thread); #endif /* starting index into array */ int index=thread*6; dat[index]=thread; dat[index+1]=blockIdx.x; dat[index+2]=blockIdx.y; dat[index+3]=threadIdx.x; dat[index+4]=threadIdx.y; dat[index+5]=threadIdx.z; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
ef1bf3750a55d0a8357540cfb10f623fe44af73b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include "CudaParser.cuh" __device__ cu::Mathexpr<thrust::complex<double>>* g_pExpr; __global__ void memset_expr(cu::CudaParserErrorCodes* pCode, thrust::complex<double>* vec, std::size_t n, const char* pStr, std::size_t cbStr) { auto i = threadIdx.x + blockIdx.x * blockDim.x; if (i == 0) g_pExpr = new cu::Mathexpr<thrust::complex<double>>(pCode, pStr, cbStr); __syncthreads(); if (*pCode == cu::CudaParserErrorCodes::Success && i < n) { auto& m = *g_pExpr; auto rv = m(thrust::complex<double>(i, i)); *pCode = rv.return_code(); if (bool(rv)) vec[i] = rv.value(); } } int main() { hipError_t cudaStatus; const char pStr[] = "f(x) = 2*j1(0.1*3.14*sin(x)) / (0.1*3.14*sin(x))"; thrust::complex<double> V[2048]; std::size_t cbStack; cudaStatus = hipDeviceGetLimit(&cbStack, hipLimitStackSize); if (cudaStatus != 0) return -6; cudaStatus = hipDeviceSetLimit(hipLimitStackSize, 1 << 13); if (cudaStatus != 0) return -5; auto pStr_d = make_cuda_unique_ptr<char>(sizeof(pStr)); if (!pStr_d) return -100; auto V_d = make_cuda_unique_ptr<thrust::complex<double>>(sizeof(V) / sizeof(thrust::complex<double>)); if (!V_d) return -100; auto pCode = make_cuda_unique_ptr<cu::CudaParserErrorCodes>(); if (!pCode) return -100; cudaStatus = hipMemcpy(pStr_d.get(), pStr, sizeof(pStr) - 1, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) return -1; hipLaunchKernelGGL(( memset_expr), dim3(1), dim3(sizeof(V) / sizeof(thrust::complex<double>)), 0, 0, pCode.get(), V_d.get(), sizeof(V) / sizeof(thrust::complex<double>), pStr_d.get(), sizeof(pStr) - 1); /*cuda_string expression = "f(x, y) = min(x, 5, y) + min(y, 5, x) + max(x, 5, y) + max(y, 5, x)"; Mathexpr<double> mathexpr(expression); cuda_vector<double> v; v.push_back(1); v.push_back(10); mathexpr.init_variables(v);*/ //std::cout << "Value: " << mathexpr.compute() << "\n"; //cuda_list<double> l; cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize failed!"); return -2; } cu::CudaParserErrorCodes errc; cudaStatus = hipMemcpy(&errc, pCode.get(), sizeof(cu::CudaParserErrorCodes), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) return -3; if (errc == cu::CudaParserErrorCodes::Success) { cudaStatus = hipMemcpy(V, V_d.get(), sizeof(V), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) return -3; for (auto elem:V) std::cout << elem << " "; std::cout << "\n"; }else { printf("CUDA kernel returned code %d (%s)", int(errc), strerror(errc)); return -50; } //printf("%d", l.front()); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return -4; } return 0; }
ef1bf3750a55d0a8357540cfb10f623fe44af73b.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include "CudaParser.cuh" __device__ cu::Mathexpr<thrust::complex<double>>* g_pExpr; __global__ void memset_expr(cu::CudaParserErrorCodes* pCode, thrust::complex<double>* vec, std::size_t n, const char* pStr, std::size_t cbStr) { auto i = threadIdx.x + blockIdx.x * blockDim.x; if (i == 0) g_pExpr = new cu::Mathexpr<thrust::complex<double>>(pCode, pStr, cbStr); __syncthreads(); if (*pCode == cu::CudaParserErrorCodes::Success && i < n) { auto& m = *g_pExpr; auto rv = m(thrust::complex<double>(i, i)); *pCode = rv.return_code(); if (bool(rv)) vec[i] = rv.value(); } } int main() { cudaError_t cudaStatus; const char pStr[] = "f(x) = 2*j1(0.1*3.14*sin(x)) / (0.1*3.14*sin(x))"; thrust::complex<double> V[2048]; std::size_t cbStack; cudaStatus = cudaDeviceGetLimit(&cbStack, cudaLimitStackSize); if (cudaStatus != 0) return -6; cudaStatus = cudaDeviceSetLimit(cudaLimitStackSize, 1 << 13); if (cudaStatus != 0) return -5; auto pStr_d = make_cuda_unique_ptr<char>(sizeof(pStr)); if (!pStr_d) return -100; auto V_d = make_cuda_unique_ptr<thrust::complex<double>>(sizeof(V) / sizeof(thrust::complex<double>)); if (!V_d) return -100; auto pCode = make_cuda_unique_ptr<cu::CudaParserErrorCodes>(); if (!pCode) return -100; cudaStatus = cudaMemcpy(pStr_d.get(), pStr, sizeof(pStr) - 1, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) return -1; memset_expr<<<1, sizeof(V) / sizeof(thrust::complex<double>)>>>(pCode.get(), V_d.get(), sizeof(V) / sizeof(thrust::complex<double>), pStr_d.get(), sizeof(pStr) - 1); /*cuda_string expression = "f(x, y) = min(x, 5, y) + min(y, 5, x) + max(x, 5, y) + max(y, 5, x)"; Mathexpr<double> mathexpr(expression); cuda_vector<double> v; v.push_back(1); v.push_back(10); mathexpr.init_variables(v);*/ //std::cout << "Value: " << mathexpr.compute() << "\n"; //cuda_list<double> l; cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize failed!"); return -2; } cu::CudaParserErrorCodes errc; cudaStatus = cudaMemcpy(&errc, pCode.get(), sizeof(cu::CudaParserErrorCodes), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) return -3; if (errc == cu::CudaParserErrorCodes::Success) { cudaStatus = cudaMemcpy(V, V_d.get(), sizeof(V), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) return -3; for (auto elem:V) std::cout << elem << " "; std::cout << "\n"; }else { printf("CUDA kernel returned code %d (%s)", int(errc), strerror(errc)); return -50; } //printf("%d", l.front()); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return -4; } return 0; }
42259bdd189fbdc1ec0b9939d75bc84ca4a41702.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> __global__ void init(int n, float *x, float* y) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { x[i] = 1.0f; y[i] = 2.0f; } } // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { // threadIdx.x contains the index of the current thread within its block, // blockDim.x contains the number of threads in the block int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(float)); hipMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the device int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; init << < numBlocks, blockSize >> > (N, x, y); add << < numBlocks, blockSize >> > (N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
42259bdd189fbdc1ec0b9939d75bc84ca4a41702.cu
#include <iostream> #include <math.h> __global__ void init(int n, float *x, float* y) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { x[i] = 1.0f; y[i] = 2.0f; } } // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { // threadIdx.x contains the index of the current thread within its block, // blockDim.x contains the number of threads in the block int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the device int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; init << < numBlocks, blockSize >> > (N, x, y); add << < numBlocks, blockSize >> > (N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
2b0cc9b506e6176a5a54732ae89a796dca125013.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.hpp" #include "gemm.hpp" #include "size.hpp" namespace CudaMM { namespace Kernel { Matrix2D<0, 0>::Matrix2D(CudaMM::Matrix2D<0, 0> const& mat) : pitch{mat.pitch}, rows{mat.rows}, cols{mat.cols} { } template <> __global__ __launch_bounds__(BlockSize, MinBlocksPerMultiprocessor) void gemm_impl<LhsRows / Devices, LhsCols, RhsCols>( ProblemData<LhsRows / Devices, LhsCols, RhsCols> problem, const Element* __restrict__ lhs, const Element* __restrict__ rhs, Element* __restrict__ result) { extern __shared__ Element shared_memory[]; // Element lhs_shared[LhsSharedRows][LhsSharedCols]; // Element rhs_shared[RhsSharedRows][RhsSharedCols]; // Element result_shared[ResultSharedRows][ResultSharedCols]; Element(*__restrict__ lhs_shared)[LhsSharedCols] = reinterpret_cast<Element(*)[LhsSharedCols]>(shared_memory); Element(*__restrict__ rhs_shared)[RhsSharedCols] = reinterpret_cast<Element(*)[RhsSharedCols]>(lhs_shared + LhsSharedRows); Element(*__restrict__ result_shared)[ResultSharedCols] = reinterpret_cast<Element(*)[ResultSharedCols]>(rhs_shared + RhsSharedRows); Element result_reg[ThreadRows][ThreadCols] = {}; auto const block_offset_row = BlockRows * blockIdx.y, block_offset_col = BlockCols * blockIdx.x; auto const &row_in_block = threadIdx.y, &col_in_block = threadIdx.x; auto const thread_idx = BlockColThreads * row_in_block + col_in_block; // storing to lhs_shared is divided into (LhsLoadUnitRows x LhsLoadUnitCols (= BlockSize)) constexpr uint32_t LhsLoadUnitCols = gcd(Stride, BlockSize), LhsLoadUnitRows = BlockSize / LhsLoadUnitCols; // storing to rhs_shared is divided into (RhsLoadUnitRows x RhsLoadUnitCols (= BlockSize)) constexpr uint32_t RhsLoadUnitCols = BlockColThreads * gcd(ThreadCols, BlockRowThreads), RhsLoadUnitRows = BlockSize / RhsLoadUnitCols; auto const lhs_load_row = thread_idx / LhsLoadUnitCols, lhs_load_col = thread_idx % LhsLoadUnitCols; auto const rhs_load_row = thread_idx / RhsLoadUnitCols, rhs_load_col = thread_idx % RhsLoadUnitCols; lhs += (block_offset_row + lhs_load_row * (BlockRows / LhsLoadUnitRows)) * problem.lhs.pitch + blockIdx.z * MaxAccumulation + lhs_load_col; rhs += (blockIdx.z * MaxAccumulation + rhs_load_row * (Stride / RhsLoadUnitRows)) * problem.rhs.pitch + block_offset_col + rhs_load_col; // initialize result_shared { #pragma unroll for (uint32_t col_begin{0}; col_begin < (BlockColThreads * ThreadSharedCols); col_begin += BlockColThreads) { #pragma unroll for (uint32_t row_begin{0}; row_begin < (BlockRowThreads * ThreadSharedRows); row_begin += BlockRowThreads) { result_shared[row_begin + row_in_block][col_begin + col_in_block] = 0; } } } #pragma unroll for (uint32_t lhs_stride_col{0}; lhs_stride_col < MaxAccumulation; lhs_stride_col += Stride) { auto const& rhs_stride_row = lhs_stride_col; if (lhs_stride_col != 0) { __syncthreads(); } // load lhs { #pragma unroll for (uint32_t offset_col{0}; offset_col < Stride; offset_col += LhsLoadUnitCols) { #pragma unroll for (uint32_t offset_row{0}; offset_row < (BlockRows / LhsLoadUnitRows); offset_row++) { lhs_shared[offset_row + lhs_load_row * (BlockRows / LhsLoadUnitRows)][offset_col + lhs_load_col] = lhs[offset_row * problem.lhs.pitch + (lhs_stride_col + offset_col)]; } } } // load rhs { #pragma unroll for (uint32_t offset_col{0}; offset_col < BlockCols; offset_col += RhsLoadUnitCols) { #pragma unroll for (uint32_t offset_row{0}; offset_row < (Stride / RhsLoadUnitRows); offset_row++) { rhs_shared[offset_row + rhs_load_row * (Stride / RhsLoadUnitRows)][offset_col + rhs_load_col] = rhs[(rhs_stride_row + offset_row) * problem.rhs.pitch + offset_col]; } } } __syncthreads(); #pragma unroll for (uint32_t in_stride{0}; in_stride < Stride; in_stride++) { if (ThreadRows < ThreadCols) { // rows is less than cols, so load lhs first Element lhs_cache[ThreadRows]; #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { lhs_cache[row] = lhs_shared[row * BlockRowThreads + row_in_block][in_stride]; } // then, load each of rhs and calc #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { auto rhs_cache = rhs_shared[in_stride][col * BlockColThreads + col_in_block]; #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { auto const result = lhs_cache[row] * rhs_cache; if (row < ThreadSharedRows) { if (col < ThreadSharedCols) { result_shared[row * BlockRowThreads + row_in_block][col * BlockColThreads + col_in_block] += result; } else { result_reg[row][col] += result; } } else { result_reg[row][col] += result; } } } } else { // cols is less than rows, so load rhs first Element rhs_cache[ThreadCols]; #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { rhs_cache[col] = rhs_shared[in_stride][col * BlockColThreads + col_in_block]; } // then, load each of lhs and calc #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { auto lhs_cache = lhs_shared[row * BlockRowThreads + row_in_block][in_stride]; #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { auto const result = lhs_cache * rhs_cache[col]; if (row < ThreadSharedRows) { if (col < ThreadSharedCols) { result_shared[row * BlockRowThreads + row_in_block][col * BlockColThreads + col_in_block] += result; } else { result_reg[row][col] += result; } } else { result_reg[row][col] += result; } } } } } } #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { if (row < ThreadSharedRows) { if (col < ThreadSharedCols) { atomicAdd(&result[(block_offset_row + row_in_block + row * BlockRowThreads) * problem.result.pitch + block_offset_col + col_in_block + col * BlockColThreads], result_shared[row * BlockRowThreads + row_in_block][col * BlockColThreads + col_in_block]); } else { atomicAdd(&result[(block_offset_row + row_in_block + row * BlockRowThreads) * problem.result.pitch + block_offset_col + col_in_block + col * BlockColThreads], result_reg[row][col]); } } else { atomicAdd(&result[(block_offset_row + row_in_block + row * BlockRowThreads) * problem.result.pitch + block_offset_col + col_in_block + col * BlockColThreads], result_reg[row][col]); } } } } template <> void gemm<Devices, 0, LhsRows / Devices, LhsCols, RhsCols>( CudaMM::ProblemData<LhsRows / Devices, LhsCols, RhsCols>& problem, const Element* lhs, const Element* rhs, Element* result) { // for 128-byte transactions on loading from global memory static_assert(BlockCols % WarpThreads == 0, "BlockCols % WarpThreads == 0"); static_assert(Stride % WarpThreads == 0, "Stride % WarpThreads == 0"); // loading from global memory should be unrolled loops static_assert((ThreadRows * Stride) % BlockColThreads == 0, "(ThreadRows * Stride) % BlockColThreads == 0"); static_assert((ThreadCols * Stride) % BlockRowThreads == 0, "(ThreadCols * Stride) % BlockRowThreads == 0"); // block size should be a multiple of warp size static_assert(BlockSize % WarpThreads == 0, "BlockSize % WarpThreads == 0"); // result stored in shared memory should be a part static_assert(ThreadSharedRows <= ThreadRows, "ThreadSharedRows <= ThreadRows"); static_assert(ThreadSharedCols <= ThreadCols, "ThreadSharedCols <= ThreadCols"); static_assert(ThreadSharedRows != 0 || ThreadSharedCols == 0, "ThreadSharedRows != 0 || ThreadSharedCols == 0"); static_assert(ThreadSharedRows == 0 || ThreadSharedCols != 0, "ThreadSharedRows == 0 || ThreadSharedCols != 0"); // restriction of hardware static_assert(BlockSize <= 1024, "BlockSize <= 1024"); static_assert(MinBlocksPerMultiprocessor <= 32, "MinBlocksPerMultiprocessor <= 32"); static_assert(BlockSize * MinBlocksPerMultiprocessor <= 2048, "BlockSize * MinBlocksPerMultiprocessor <= 2048"); static_assert(sizeof(Element) * SharedSize * MinBlocksPerMultiprocessor <= 0x18000, "sizeof(Element) * SharedSize * MinBlocksPerMultiprocessor <= 0x18000"); static_assert(problem.result.rows % BlockRows == 0, "problem.result.rows % BlockRows == 0"); static_assert(problem.result.cols % BlockCols == 0, "problem.result.cols % BlockCols == 0"); static_assert(problem.lhs.cols % MaxAccumulation == 0, "problem.lhs.cols % MaxAccumulation == 0"); static_assert(MaxAccumulation % Stride == 0, "MaxAccumulation % Stride == 0"); CUDA_CHECK(hipMemset2DAsync( problem.result.data.get(), sizeof(Element) * problem.result.pitch, 0, sizeof(Element) * problem.result.cols, problem.result.rows, 0)); CUDA_CHECK(hipMemcpy2DAsync( problem.lhs.data.get(), sizeof(Element) * problem.lhs.pitch, lhs, sizeof(Element) * problem.lhs.cols, sizeof(Element) * problem.lhs.cols, problem.lhs.rows, hipMemcpyDefault, 0)); CUDA_CHECK(hipMemcpy2DAsync( problem.rhs.data.get(), sizeof(Element) * problem.rhs.pitch, rhs, sizeof(Element) * problem.rhs.cols, sizeof(Element) * problem.rhs.cols, problem.rhs.rows, hipMemcpyDefault, 0)); CUDA_CHECK(hipFuncSetAttribute(gemm_impl<LhsRows / Devices, LhsCols, RhsCols>, hipFuncAttributeMaxDynamicSharedMemorySize, sizeof(Element) * SharedSize)); CUDA_CHECK(hipFuncSetAttribute(gemm_impl<LhsRows / Devices, LhsCols, RhsCols>, hipFuncAttributePreferredSharedMemoryCarveout, 100)); hipLaunchKernelGGL(( gemm_impl), dim3({problem.result.cols / BlockCols), dim3(problem.result.rows / BlockRows), problem.lhs.cols / MaxAccumulation}, {BlockColThreads, BlockRowThreads}, sizeof(Element) * SharedSize, toKernel(problem), problem.lhs.data.get(), problem.rhs.data.get(), problem.result.data.get()); CUDA_CHECK(hipMemcpy2DAsync( result, sizeof(Element) * problem.result.cols, problem.result.data.get(), sizeof(Element) * problem.result.pitch, sizeof(Element) * problem.result.cols, problem.result.rows, hipMemcpyDefault, 0)); } template <> void gemm<Devices, 1, LhsRows / Devices, LhsCols, RhsCols>( CudaMM::ProblemData<LhsRows / Devices, LhsCols, RhsCols>& problem, const Element* lhs, const Element* rhs, Element* result) { gemm<Devices, 0, LhsRows / Devices, LhsCols, RhsCols>( problem, lhs, rhs, result); } } // namespace Kernel } // namespace CudaMM template void CudaMM::gemm<LhsRows, LhsCols, RhsCols>( CudaMM::DeviceData<LhsRows, LhsCols, RhsCols>&, const Element* lhs, const Element* rhs, Element* result);
2b0cc9b506e6176a5a54732ae89a796dca125013.cu
#include "kernel.hpp" #include "gemm.hpp" #include "size.hpp" namespace CudaMM { namespace Kernel { Matrix2D<0, 0>::Matrix2D(CudaMM::Matrix2D<0, 0> const& mat) : pitch{mat.pitch}, rows{mat.rows}, cols{mat.cols} { } template <> __global__ __launch_bounds__(BlockSize, MinBlocksPerMultiprocessor) void gemm_impl<LhsRows / Devices, LhsCols, RhsCols>( ProblemData<LhsRows / Devices, LhsCols, RhsCols> problem, const Element* __restrict__ lhs, const Element* __restrict__ rhs, Element* __restrict__ result) { extern __shared__ Element shared_memory[]; // Element lhs_shared[LhsSharedRows][LhsSharedCols]; // Element rhs_shared[RhsSharedRows][RhsSharedCols]; // Element result_shared[ResultSharedRows][ResultSharedCols]; Element(*__restrict__ lhs_shared)[LhsSharedCols] = reinterpret_cast<Element(*)[LhsSharedCols]>(shared_memory); Element(*__restrict__ rhs_shared)[RhsSharedCols] = reinterpret_cast<Element(*)[RhsSharedCols]>(lhs_shared + LhsSharedRows); Element(*__restrict__ result_shared)[ResultSharedCols] = reinterpret_cast<Element(*)[ResultSharedCols]>(rhs_shared + RhsSharedRows); Element result_reg[ThreadRows][ThreadCols] = {}; auto const block_offset_row = BlockRows * blockIdx.y, block_offset_col = BlockCols * blockIdx.x; auto const &row_in_block = threadIdx.y, &col_in_block = threadIdx.x; auto const thread_idx = BlockColThreads * row_in_block + col_in_block; // storing to lhs_shared is divided into (LhsLoadUnitRows x LhsLoadUnitCols (= BlockSize)) constexpr uint32_t LhsLoadUnitCols = gcd(Stride, BlockSize), LhsLoadUnitRows = BlockSize / LhsLoadUnitCols; // storing to rhs_shared is divided into (RhsLoadUnitRows x RhsLoadUnitCols (= BlockSize)) constexpr uint32_t RhsLoadUnitCols = BlockColThreads * gcd(ThreadCols, BlockRowThreads), RhsLoadUnitRows = BlockSize / RhsLoadUnitCols; auto const lhs_load_row = thread_idx / LhsLoadUnitCols, lhs_load_col = thread_idx % LhsLoadUnitCols; auto const rhs_load_row = thread_idx / RhsLoadUnitCols, rhs_load_col = thread_idx % RhsLoadUnitCols; lhs += (block_offset_row + lhs_load_row * (BlockRows / LhsLoadUnitRows)) * problem.lhs.pitch + blockIdx.z * MaxAccumulation + lhs_load_col; rhs += (blockIdx.z * MaxAccumulation + rhs_load_row * (Stride / RhsLoadUnitRows)) * problem.rhs.pitch + block_offset_col + rhs_load_col; // initialize result_shared { #pragma unroll for (uint32_t col_begin{0}; col_begin < (BlockColThreads * ThreadSharedCols); col_begin += BlockColThreads) { #pragma unroll for (uint32_t row_begin{0}; row_begin < (BlockRowThreads * ThreadSharedRows); row_begin += BlockRowThreads) { result_shared[row_begin + row_in_block][col_begin + col_in_block] = 0; } } } #pragma unroll for (uint32_t lhs_stride_col{0}; lhs_stride_col < MaxAccumulation; lhs_stride_col += Stride) { auto const& rhs_stride_row = lhs_stride_col; if (lhs_stride_col != 0) { __syncthreads(); } // load lhs { #pragma unroll for (uint32_t offset_col{0}; offset_col < Stride; offset_col += LhsLoadUnitCols) { #pragma unroll for (uint32_t offset_row{0}; offset_row < (BlockRows / LhsLoadUnitRows); offset_row++) { lhs_shared[offset_row + lhs_load_row * (BlockRows / LhsLoadUnitRows)][offset_col + lhs_load_col] = lhs[offset_row * problem.lhs.pitch + (lhs_stride_col + offset_col)]; } } } // load rhs { #pragma unroll for (uint32_t offset_col{0}; offset_col < BlockCols; offset_col += RhsLoadUnitCols) { #pragma unroll for (uint32_t offset_row{0}; offset_row < (Stride / RhsLoadUnitRows); offset_row++) { rhs_shared[offset_row + rhs_load_row * (Stride / RhsLoadUnitRows)][offset_col + rhs_load_col] = rhs[(rhs_stride_row + offset_row) * problem.rhs.pitch + offset_col]; } } } __syncthreads(); #pragma unroll for (uint32_t in_stride{0}; in_stride < Stride; in_stride++) { if (ThreadRows < ThreadCols) { // rows is less than cols, so load lhs first Element lhs_cache[ThreadRows]; #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { lhs_cache[row] = lhs_shared[row * BlockRowThreads + row_in_block][in_stride]; } // then, load each of rhs and calc #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { auto rhs_cache = rhs_shared[in_stride][col * BlockColThreads + col_in_block]; #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { auto const result = lhs_cache[row] * rhs_cache; if (row < ThreadSharedRows) { if (col < ThreadSharedCols) { result_shared[row * BlockRowThreads + row_in_block][col * BlockColThreads + col_in_block] += result; } else { result_reg[row][col] += result; } } else { result_reg[row][col] += result; } } } } else { // cols is less than rows, so load rhs first Element rhs_cache[ThreadCols]; #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { rhs_cache[col] = rhs_shared[in_stride][col * BlockColThreads + col_in_block]; } // then, load each of lhs and calc #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { auto lhs_cache = lhs_shared[row * BlockRowThreads + row_in_block][in_stride]; #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { auto const result = lhs_cache * rhs_cache[col]; if (row < ThreadSharedRows) { if (col < ThreadSharedCols) { result_shared[row * BlockRowThreads + row_in_block][col * BlockColThreads + col_in_block] += result; } else { result_reg[row][col] += result; } } else { result_reg[row][col] += result; } } } } } } #pragma unroll for (uint32_t col{0}; col < ThreadCols; col++) { #pragma unroll for (uint32_t row{0}; row < ThreadRows; row++) { if (row < ThreadSharedRows) { if (col < ThreadSharedCols) { atomicAdd(&result[(block_offset_row + row_in_block + row * BlockRowThreads) * problem.result.pitch + block_offset_col + col_in_block + col * BlockColThreads], result_shared[row * BlockRowThreads + row_in_block][col * BlockColThreads + col_in_block]); } else { atomicAdd(&result[(block_offset_row + row_in_block + row * BlockRowThreads) * problem.result.pitch + block_offset_col + col_in_block + col * BlockColThreads], result_reg[row][col]); } } else { atomicAdd(&result[(block_offset_row + row_in_block + row * BlockRowThreads) * problem.result.pitch + block_offset_col + col_in_block + col * BlockColThreads], result_reg[row][col]); } } } } template <> void gemm<Devices, 0, LhsRows / Devices, LhsCols, RhsCols>( CudaMM::ProblemData<LhsRows / Devices, LhsCols, RhsCols>& problem, const Element* lhs, const Element* rhs, Element* result) { // for 128-byte transactions on loading from global memory static_assert(BlockCols % WarpThreads == 0, "BlockCols % WarpThreads == 0"); static_assert(Stride % WarpThreads == 0, "Stride % WarpThreads == 0"); // loading from global memory should be unrolled loops static_assert((ThreadRows * Stride) % BlockColThreads == 0, "(ThreadRows * Stride) % BlockColThreads == 0"); static_assert((ThreadCols * Stride) % BlockRowThreads == 0, "(ThreadCols * Stride) % BlockRowThreads == 0"); // block size should be a multiple of warp size static_assert(BlockSize % WarpThreads == 0, "BlockSize % WarpThreads == 0"); // result stored in shared memory should be a part static_assert(ThreadSharedRows <= ThreadRows, "ThreadSharedRows <= ThreadRows"); static_assert(ThreadSharedCols <= ThreadCols, "ThreadSharedCols <= ThreadCols"); static_assert(ThreadSharedRows != 0 || ThreadSharedCols == 0, "ThreadSharedRows != 0 || ThreadSharedCols == 0"); static_assert(ThreadSharedRows == 0 || ThreadSharedCols != 0, "ThreadSharedRows == 0 || ThreadSharedCols != 0"); // restriction of hardware static_assert(BlockSize <= 1024, "BlockSize <= 1024"); static_assert(MinBlocksPerMultiprocessor <= 32, "MinBlocksPerMultiprocessor <= 32"); static_assert(BlockSize * MinBlocksPerMultiprocessor <= 2048, "BlockSize * MinBlocksPerMultiprocessor <= 2048"); static_assert(sizeof(Element) * SharedSize * MinBlocksPerMultiprocessor <= 0x18000, "sizeof(Element) * SharedSize * MinBlocksPerMultiprocessor <= 0x18000"); static_assert(problem.result.rows % BlockRows == 0, "problem.result.rows % BlockRows == 0"); static_assert(problem.result.cols % BlockCols == 0, "problem.result.cols % BlockCols == 0"); static_assert(problem.lhs.cols % MaxAccumulation == 0, "problem.lhs.cols % MaxAccumulation == 0"); static_assert(MaxAccumulation % Stride == 0, "MaxAccumulation % Stride == 0"); CUDA_CHECK(cudaMemset2DAsync( problem.result.data.get(), sizeof(Element) * problem.result.pitch, 0, sizeof(Element) * problem.result.cols, problem.result.rows, 0)); CUDA_CHECK(cudaMemcpy2DAsync( problem.lhs.data.get(), sizeof(Element) * problem.lhs.pitch, lhs, sizeof(Element) * problem.lhs.cols, sizeof(Element) * problem.lhs.cols, problem.lhs.rows, cudaMemcpyDefault, 0)); CUDA_CHECK(cudaMemcpy2DAsync( problem.rhs.data.get(), sizeof(Element) * problem.rhs.pitch, rhs, sizeof(Element) * problem.rhs.cols, sizeof(Element) * problem.rhs.cols, problem.rhs.rows, cudaMemcpyDefault, 0)); CUDA_CHECK(cudaFuncSetAttribute(gemm_impl<LhsRows / Devices, LhsCols, RhsCols>, cudaFuncAttributeMaxDynamicSharedMemorySize, sizeof(Element) * SharedSize)); CUDA_CHECK(cudaFuncSetAttribute(gemm_impl<LhsRows / Devices, LhsCols, RhsCols>, cudaFuncAttributePreferredSharedMemoryCarveout, 100)); gemm_impl<<<{problem.result.cols / BlockCols, problem.result.rows / BlockRows, problem.lhs.cols / MaxAccumulation}, {BlockColThreads, BlockRowThreads}, sizeof(Element) * SharedSize>>>( toKernel(problem), problem.lhs.data.get(), problem.rhs.data.get(), problem.result.data.get()); CUDA_CHECK(cudaMemcpy2DAsync( result, sizeof(Element) * problem.result.cols, problem.result.data.get(), sizeof(Element) * problem.result.pitch, sizeof(Element) * problem.result.cols, problem.result.rows, cudaMemcpyDefault, 0)); } template <> void gemm<Devices, 1, LhsRows / Devices, LhsCols, RhsCols>( CudaMM::ProblemData<LhsRows / Devices, LhsCols, RhsCols>& problem, const Element* lhs, const Element* rhs, Element* result) { gemm<Devices, 0, LhsRows / Devices, LhsCols, RhsCols>( problem, lhs, rhs, result); } } // namespace Kernel } // namespace CudaMM template void CudaMM::gemm<LhsRows, LhsCols, RhsCols>( CudaMM::DeviceData<LhsRows, LhsCols, RhsCols>&, const Element* lhs, const Element* rhs, Element* result);
92b7ce2a2c7f2115cb322e81129000539d74a4da.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <limits> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> namespace at { namespace native { // We manually overload angle because std::arg does not work with types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) { if (at::_isnan(v)){ return v; } return v < 0 ? M_PI : 0; } template<typename T> __host__ __device__ static inline c10::complex<T> angle_wrapper(c10::complex<T> v) { return std::arg(v); } void angle_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.common_dtype(), "angle_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return angle_wrapper(a); }); }); } // We manually overload real because std::real does not work types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t real_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline c10::complex<T> real_wrapper(c10::complex<T> v) { return v.real(); } void real_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return real_wrapper(a); }); }); } // We manually overload imag because std::imag does not work types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) { return 0; } template<typename T> __host__ __device__ static inline c10::complex<T> imag_wrapper(c10::complex<T> v) { return v.imag(); } void imag_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return imag_wrapper(a); }); }); } // We manually overload conj because std::conj does not work types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline c10::complex<T> conj_wrapper(c10::complex<T> v) { return std::conj(v); } // NB: Ignores the negative bit on tensors const char conj_name[] = "conj_kernel"; void conj_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (common_dtype == kComplexHalf) { using scalar_t = c10::complex<at::Half>; #if AT_USE_JITERATOR() static const auto conj_string = jiterator_stringify( template <typename T> T conj_kernel(T z) { return std::conj(z); } ); jitted_gpu_kernel<conj_name, scalar_t, scalar_t, 1>(iter, conj_string); #else gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return conj_wrapper(a); }); #endif } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kBool, kBFloat16, kHalf, iter.common_dtype(), "conj_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return conj_wrapper(a); }); }); } } REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda); REGISTER_DISPATCH(real_stub, &real_kernel_cuda); REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda); REGISTER_DISPATCH(conj_physical_stub, &conj_kernel_cuda); }} // namespace at::native
92b7ce2a2c7f2115cb322e81129000539d74a4da.cu
#define TORCH_ASSERT_NO_OPERATORS #include <limits> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> namespace at { namespace native { // We manually overload angle because std::arg does not work with types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) { if (at::_isnan(v)){ return v; } return v < 0 ? M_PI : 0; } template<typename T> __host__ __device__ static inline c10::complex<T> angle_wrapper(c10::complex<T> v) { return std::arg(v); } void angle_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.common_dtype(), "angle_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return angle_wrapper(a); }); }); } // We manually overload real because std::real does not work types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t real_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline c10::complex<T> real_wrapper(c10::complex<T> v) { return v.real(); } void real_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return real_wrapper(a); }); }); } // We manually overload imag because std::imag does not work types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) { return 0; } template<typename T> __host__ __device__ static inline c10::complex<T> imag_wrapper(c10::complex<T> v) { return v.imag(); } void imag_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return imag_wrapper(a); }); }); } // We manually overload conj because std::conj does not work types other than c10::complex. template<typename scalar_t> __host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) { return v; } template<typename T> __host__ __device__ static inline c10::complex<T> conj_wrapper(c10::complex<T> v) { return std::conj(v); } // NB: Ignores the negative bit on tensors const char conj_name[] = "conj_kernel"; void conj_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (common_dtype == kComplexHalf) { using scalar_t = c10::complex<at::Half>; #if AT_USE_JITERATOR() static const auto conj_string = jiterator_stringify( template <typename T> T conj_kernel(T z) { return std::conj(z); } ); jitted_gpu_kernel<conj_name, scalar_t, scalar_t, 1>(iter, conj_string); #else gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return conj_wrapper(a); }); #endif } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kBool, kBFloat16, kHalf, iter.common_dtype(), "conj_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return conj_wrapper(a); }); }); } } REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda); REGISTER_DISPATCH(real_stub, &real_kernel_cuda); REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda); REGISTER_DISPATCH(conj_physical_stub, &conj_kernel_cuda); }} // namespace at::native
2a26d6899a2447d61e04f066ee2e6d5dc4e35c35.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <hip/hip_runtime.h> #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/simple_white_shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_pointcloud_normal_functor { copy_pointcloud_normal_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, float line_length) : points_(points), normals_(normals), line_length_(line_length){}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const float line_length_; __device__ Eigen::Vector3f operator()(size_t idx) { int i = idx / 2; int j = idx % 2; if (j == 0) { return points_[i]; } else { return points_[i] + normals_[i] * line_length_; } } }; struct copy_mesh_wireflame_functor { copy_mesh_wireflame_functor(const Eigen::Vector3f *vertices, const int *triangles) : vertices_(vertices), triangles_(triangles){}; const Eigen::Vector3f *vertices_; const int *triangles_; __device__ Eigen::Vector3f operator()(size_t k) { int vi = triangles_[k]; return vertices_[vi]; } }; } // namespace bool SimpleWhiteShader::Compile() { if (CompileShaders(simple_white_vertex_shader, NULL, simple_white_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleWhiteShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleWhiteShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(1, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(1); bound_ = true; return true; } bool SimpleWhiteShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); return true; } void SimpleWhiteShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[0])); glDeleteBuffers(1, &vertex_position_buffer_); bound_ = false; } } bool SimpleWhiteShaderForPointCloudNormal::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleWhiteShaderForPointCloudNormal::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } float line_length = option.point_size_ * 0.01 * view.GetBoundingBox().GetMaxExtent(); copy_pointcloud_normal_functor func( thrust::raw_pointer_cast(pointcloud.points_.data()), thrust::raw_pointer_cast(pointcloud.normals_.data()), line_length); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(pointcloud.points_.size() * 2), points, func); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(pointcloud.points_.size() * 2); return true; } size_t SimpleWhiteShaderForPointCloudNormal::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size() * 2; } bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } glLineWidth(1.0f); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glDisable(GL_POLYGON_OFFSET_FILL); return true; } bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty geometry::TriangleMesh."); return false; } copy_mesh_wireflame_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data()))); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), points, func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleWhiteShaderForTriangleMeshWireFrame::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
2a26d6899a2447d61e04f066ee2e6d5dc4e35c35.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <cuda_runtime.h> #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/simple_white_shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_pointcloud_normal_functor { copy_pointcloud_normal_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, float line_length) : points_(points), normals_(normals), line_length_(line_length){}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const float line_length_; __device__ Eigen::Vector3f operator()(size_t idx) { int i = idx / 2; int j = idx % 2; if (j == 0) { return points_[i]; } else { return points_[i] + normals_[i] * line_length_; } } }; struct copy_mesh_wireflame_functor { copy_mesh_wireflame_functor(const Eigen::Vector3f *vertices, const int *triangles) : vertices_(vertices), triangles_(triangles){}; const Eigen::Vector3f *vertices_; const int *triangles_; __device__ Eigen::Vector3f operator()(size_t k) { int vi = triangles_[k]; return vertices_[vi]; } }; } // namespace bool SimpleWhiteShader::Compile() { if (CompileShaders(simple_white_vertex_shader, NULL, simple_white_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleWhiteShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleWhiteShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(1, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(1); bound_ = true; return true; } bool SimpleWhiteShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); return true; } void SimpleWhiteShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); glDeleteBuffers(1, &vertex_position_buffer_); bound_ = false; } } bool SimpleWhiteShaderForPointCloudNormal::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleWhiteShaderForPointCloudNormal::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } float line_length = option.point_size_ * 0.01 * view.GetBoundingBox().GetMaxExtent(); copy_pointcloud_normal_functor func( thrust::raw_pointer_cast(pointcloud.points_.data()), thrust::raw_pointer_cast(pointcloud.normals_.data()), line_length); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(pointcloud.points_.size() * 2), points, func); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(pointcloud.points_.size() * 2); return true; } size_t SimpleWhiteShaderForPointCloudNormal::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size() * 2; } bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } glLineWidth(1.0f); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glDisable(GL_POLYGON_OFFSET_FILL); return true; } bool SimpleWhiteShaderForTriangleMeshWireFrame::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty geometry::TriangleMesh."); return false; } copy_mesh_wireflame_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data()))); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), points, func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleWhiteShaderForTriangleMeshWireFrame::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
GPUSorting0.hip
// !!! This is a file automatically generated by hipify!!! #include <Aquila/Thrust_interop.hpp> #include "Aquila/utilities/GPUSortingPriv.hpp" namespace cv { namespace cuda { namespace detail { template AQUILA_EXPORTS void sortAscending<uchar>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescending<uchar>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<uchar>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<uchar>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscending<char>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescending<char>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<char>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<char>(cv::cuda::GpuMat&, hipStream_t); } } }
GPUSorting0.cu
#include <Aquila/Thrust_interop.hpp> #include "Aquila/utilities/GPUSortingPriv.hpp" namespace cv { namespace cuda { namespace detail { template AQUILA_EXPORTS void sortAscending<uchar>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescending<uchar>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<uchar>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<uchar>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscending<char>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescending<char>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<char>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<char>(cv::cuda::GpuMat&, cudaStream_t); } } }
544530d08a7469fd91dc725dbd5d174da0f9391c.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/visualization/shader/normal_shader.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; const auto &vertex = vertices_[vi]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertex, triangle_normals_[i]); } else { return thrust::make_tuple(vertex, vertex_normals_[vi]); } } }; } bool NormalShader::Compile() { if (CompileShaders(normal_vertex_shader, NULL, normal_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); return true; } void NormalShader::Release() { UnbindGeometry(); ReleaseProgram(); } bool NormalShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool NormalShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); return true; } void NormalShader::UnbindGeometry() { if (bound_) { glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); bound_ = false; } } bool NormalShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPointSize(GLfloat(option.point_size_)); return true; } bool NormalShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } if (pointcloud.HasNormals() == false) { PrintShaderWarning("Binding failed with pointcloud with no normals."); return false; } thrust::copy(pointcloud.points_.begin(), pointcloud.points_.end(), points); thrust::copy(pointcloud.normals_.begin(), pointcloud.normals_.end(), normals); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t NormalShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool NormalShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool NormalShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t NormalShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
544530d08a7469fd91dc725dbd5d174da0f9391c.cu
#include "cupoch/visualization/shader/normal_shader.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include <cuda_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; const auto &vertex = vertices_[vi]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertex, triangle_normals_[i]); } else { return thrust::make_tuple(vertex, vertex_normals_[vi]); } } }; } bool NormalShader::Compile() { if (CompileShaders(normal_vertex_shader, NULL, normal_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); return true; } void NormalShader::Release() { UnbindGeometry(); ReleaseProgram(); } bool NormalShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool NormalShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); return true; } void NormalShader::UnbindGeometry() { if (bound_) { glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); bound_ = false; } } bool NormalShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPointSize(GLfloat(option.point_size_)); return true; } bool NormalShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } if (pointcloud.HasNormals() == false) { PrintShaderWarning("Binding failed with pointcloud with no normals."); return false; } thrust::copy(pointcloud.points_.begin(), pointcloud.points_.end(), points); thrust::copy(pointcloud.normals_.begin(), pointcloud.normals_.end(), normals); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t NormalShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool NormalShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool NormalShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t NormalShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
89f8bdfe43e0d9afdb99f42b0fb2f36e74166e6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cuda_kernels.h" #include "../include/parse-args.h" #include <hiprand/hiprand_kernel.h> #define typedef uint unsigned long //__device__ int dummy = 0; __device__ int final_block_idx_dev = -1; __device__ int final_thread_idx_dev = -1; __device__ double final_gather_data_dev = -1; template<int v> __global__ void scatter_t(double* target, double* source, long* ti, long* si) { extern __shared__ char space[]; int gid = v*(blockIdx.x * blockDim.x + threadIdx.x); double buf[v]; long idx[v]; for(int i = 0; i < v; i++){ buf[i] = source[gid+i]; } for(int i = 0; i < v; i++){ idx[i] = ti[gid+i]; } for(int i = 0; i < v; i++){ target[idx[i]] = buf[i]; } } template<int v> __global__ void gather_t(double* target, double* source, long* ti, long* si) { extern __shared__ char space[]; int gid = v*(blockIdx.x * blockDim.x + threadIdx.x); double buf[v]; for(int i = 0; i < v; i++){ buf[i] = source[si[gid+i]]; } for(int i = 0; i < v; i++){ target[gid+i] = buf[i]; } } //__global__ void gather_new(double *target, template<int v> __global__ void sg_t(double* target, double* source, long* ti, long* si) { extern __shared__ char space[]; int gid = v*(blockIdx.x * blockDim.x + threadIdx.x); long sidx[v]; long tidx[v]; for(int i = 0; i < v; i++){ sidx[i] = si[gid+i]; } for(int i = 0; i < v; i++){ tidx[i] = ti[gid+i]; } for(int i = 0; i < v; i++){ target[tidx[i]] = source[sidx[i]]; } } #define INSTANTIATE(V)\ template __global__ void scatter_t<V>(double* target, double* source, long* ti, long* si);\ template __global__ void gather_t<V>(double* target, double* source, long* ti, long* si); \ template __global__ void sg_t<V>(double* target, double* source, long* ti, long* si); INSTANTIATE(1); INSTANTIATE(2); INSTANTIATE(4); INSTANTIATE(5); INSTANTIATE(8); INSTANTIATE(16); INSTANTIATE(32); INSTANTIATE(64); INSTANTIATE(128); INSTANTIATE(256); INSTANTIATE(512); INSTANTIATE(1024); INSTANTIATE(2048); INSTANTIATE(4096); extern "C" int translate_args(unsigned int dim, unsigned int* grid, unsigned int* block, dim3 *grid_dim, dim3 *block_dim){ if (!grid || !block || dim == 0 || dim > 3) { return 1; } if (dim == 1) { *grid_dim = dim3(grid[0]); *block_dim = dim3(block[0]); }else if (dim == 2) { *grid_dim = dim3(grid[0], grid[1]); *block_dim = dim3(block[0], block[1]); }else if (dim == 3) { *grid_dim = dim3(grid[0], grid[1], grid[2]); *block_dim = dim3(block[0], block[1], block[2]); } return 0; } extern "C" float cuda_sg_wrapper(enum sg_kernel kernel, size_t vector_len, uint dim, uint* grid, uint* block, double* target, double *source, long* ti, long* si, unsigned int shmem){ dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); if(kernel == SCATTER) { if (vector_len == 1) hipLaunchKernelGGL(( scatter_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2) hipLaunchKernelGGL(( scatter_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4) hipLaunchKernelGGL(( scatter_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 5) hipLaunchKernelGGL(( scatter_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 8) hipLaunchKernelGGL(( scatter_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 16) hipLaunchKernelGGL(( scatter_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 32) hipLaunchKernelGGL(( scatter_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 64) hipLaunchKernelGGL(( scatter_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 128) hipLaunchKernelGGL(( scatter_t<128>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 256) hipLaunchKernelGGL(( scatter_t<256>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 512) hipLaunchKernelGGL(( scatter_t<512>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 1024) hipLaunchKernelGGL(( scatter_t<1024>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2048) hipLaunchKernelGGL(( scatter_t<2048>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4096) hipLaunchKernelGGL(( scatter_t<4096>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GATHER) { if (vector_len == 1) hipLaunchKernelGGL(( gather_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2) hipLaunchKernelGGL(( gather_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4) hipLaunchKernelGGL(( gather_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 5) hipLaunchKernelGGL(( gather_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 8) hipLaunchKernelGGL(( gather_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 16) hipLaunchKernelGGL(( gather_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 32) hipLaunchKernelGGL(( gather_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 64) hipLaunchKernelGGL(( gather_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 128) hipLaunchKernelGGL(( gather_t<128>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 256) hipLaunchKernelGGL(( gather_t<256>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 512) hipLaunchKernelGGL(( gather_t<512>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 1024) hipLaunchKernelGGL(( gather_t<1024>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2048) hipLaunchKernelGGL(( gather_t<2048>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4096) hipLaunchKernelGGL(( gather_t<4096>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GS) { if (vector_len == 1) hipLaunchKernelGGL(( sg_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2) hipLaunchKernelGGL(( sg_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4) hipLaunchKernelGGL(( sg_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 5) hipLaunchKernelGGL(( sg_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 8) hipLaunchKernelGGL(( sg_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 16) hipLaunchKernelGGL(( sg_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 32) hipLaunchKernelGGL(( sg_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 64) hipLaunchKernelGGL(( sg_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 128) hipLaunchKernelGGL(( sg_t<128>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 256) hipLaunchKernelGGL(( sg_t<256>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 512) hipLaunchKernelGGL(( sg_t<512>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 1024) hipLaunchKernelGGL(( sg_t<1024>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2048) hipLaunchKernelGGL(( sg_t<2048>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4096) hipLaunchKernelGGL(( sg_t<4096>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else { printf("ERROR UNRECOGNIZED KERNEL\n"); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; } //assume block size >= index buffer size //assume index buffer size divides block size template<int V> __global__ void scatter_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_shared[tid] = idx[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+gatherid)*delta; //for (int i = 0; i < wpb; i++) { src_loc[idx_shared[tid%V]] = idx_shared[tid%V]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} } //assume block size >= index buffer size //assume index buffer size divides block size template<int V> __global__ void scatter_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; int ngatherperblock = blockDim.x / V; int gatherid = tid / V; unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence unsigned long long offset = gatherid; hiprandState_t state; hiprand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc int random_gatherid = (int)(n * hiprand_uniform(&state)); if (tid < V) { idx_shared[tid] = idx[tid]; } double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta; //for (int i = 0; i < wpb; i++) { src_loc[idx_shared[tid%V]] = idx_shared[tid%V]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} } //V2 = 8 //assume block size >= index buffer size //assume index buffer size divides block size template<int V> __global__ void gather_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_shared[tid] = idx[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = src_loc[idx_shared[tid%V]]; return; } #endif double x; //for (int i = 0; i < wpb; i++) { x = src_loc[idx_shared[tid%V]]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } template<int V> __global__ void gather_block_morton(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, uint32_t *order, char validate) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_shared[tid] = idx[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+order[gatherid])*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = src_loc[idx_shared[tid%V]]; return; } #endif double x; //for (int i = 0; i < wpb; i++) { x = src_loc[idx_shared[tid%V]]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } template<int V> __global__ void gather_block_stride(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, int stride, char validate) { int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = src_loc[stride*(tid%V)]; } #endif double x; //for (int i = 0; i < wpb; i++) { x = src_loc[stride*(tid%V)]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } template<int V> __global__ void gather_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; int ngatherperblock = blockDim.x / V; int gatherid = tid / V; unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence unsigned long long offset = gatherid; hiprandState_t state; hiprand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc int random_gatherid = (int)(n * hiprand_uniform(&state)); if (tid < V) { idx_shared[tid] = idx[tid]; } double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta; double x; //for (int i = 0; i < wpb; i++) { x = src_loc[idx_shared[tid%V]]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } //todo -- add WRAP template<int V> __global__ void gather_new(double* source, sgIdx_t* idx, size_t delta, int dummy, int wpt) { __shared__ int idx_shared[V]; int tid = threadIdx.x; //int bid = blockIdx.x; //int nblk = blockDim.x; if (tid < V) { idx_shared[tid] = idx[tid]; } int gid = (blockIdx.x * blockDim.x + threadIdx.x); double *sl = source + wpt*gid*delta; double buf[V]; for (int j = 0; j < wpt; j++) { for (int i = 0; i < V; i++) { buf[i] = sl[idx_shared[i]]; //source[i+gid*delta] = 8; //sl[i] = sl[idx[i]]; } sl = sl + delta; } if (dummy) { sl[idx_shared[0]] = buf[dummy]; } /* for (int i = 0; i < V; i++) { if (buf[i] == 199402) { printf("oop\n"); } } */ //printf("idx[1]: %d\n", idx[1]); /* for (int i = 0; i < V; i++) { printf("idx %d is %zu", i, idx[i]); } printf("\n"); */ } #define INSTANTIATE2(V)\ template __global__ void gather_new<V>(double* source, sgIdx_t* idx, size_t delta, int dummy, int wpt); \ template __global__ void gather_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate);\ template __global__ void gather_block_morton<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, uint32_t *order, char validate);\ template __global__ void gather_block_stride<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, int stride, char validate);\ template __global__ void scatter_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate); \ template __global__ void gather_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n); \ template __global__ void scatter_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n); //INSTANTIATE2(1); //INSTANTIATE2(2); //INSTANTIATE2(4); //INSTANTIATE2(5); INSTANTIATE2(8); INSTANTIATE2(16); INSTANTIATE2(32); INSTANTIATE2(64); INSTANTIATE2(73); INSTANTIATE2(128); INSTANTIATE2(256); INSTANTIATE2(512); INSTANTIATE2(1024); INSTANTIATE2(2048); INSTANTIATE2(4096); extern "C" float cuda_block_wrapper(uint dim, uint* grid, uint* block, enum sg_kernel kernel, double *source, sgIdx_t* pat_dev, sgIdx_t* pat, size_t pat_len, size_t delta, size_t n, size_t wrap, int wpt, size_t morton, uint32_t *order, uint32_t *order_dev, int stride, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, hipMemcpyHostToDevice); hipMemcpy(order_dev, order, sizeof(uint32_t)*n, hipMemcpyHostToDevice); hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); // KERNEL if (kernel == GATHER) { if (morton) { if (pat_len == 8) { hipLaunchKernelGGL(( gather_block_morton<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( gather_block_morton<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( gather_block_morton<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( gather_block_morton<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 73) { hipLaunchKernelGGL(( gather_block_morton<73>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( gather_block_morton<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 256) { hipLaunchKernelGGL(( gather_block_morton<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( gather_block_morton<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( gather_block_morton<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( gather_block_morton<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 4096) { hipLaunchKernelGGL(( gather_block_morton<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } else if (stride >= 0) { if (pat_len == 8) { hipLaunchKernelGGL(( gather_block_stride<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( gather_block_stride<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( gather_block_stride<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( gather_block_stride<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 73) { hipLaunchKernelGGL(( gather_block_stride<73>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( gather_block_stride<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 256) { hipLaunchKernelGGL(( gather_block_stride<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( gather_block_stride<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( gather_block_stride<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( gather_block_stride<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 4096) { hipLaunchKernelGGL(( gather_block_stride<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, stride, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } else { if (pat_len == 8) { hipLaunchKernelGGL(( gather_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( gather_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( gather_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( gather_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 73) { hipLaunchKernelGGL(( gather_block<73>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( gather_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 256) { hipLaunchKernelGGL(( gather_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( gather_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( gather_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( gather_block<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 4096) { hipLaunchKernelGGL(( gather_block<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } hipMemcpyFromSymbol(final_gather_data, final_gather_data_dev, sizeof(double), 0, hipMemcpyDeviceToHost); } else if (kernel == SCATTER) { if (pat_len == 8) { hipLaunchKernelGGL(( scatter_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( scatter_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( scatter_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( scatter_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( scatter_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len ==256) { hipLaunchKernelGGL(( scatter_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( scatter_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( scatter_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( scatter_block<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len ==4096) { hipLaunchKernelGGL(( scatter_block<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED, %zu\n", pat_len); } } hipEventRecord(stop); hipEventSynchronize(stop); hipMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; } extern "C" float cuda_block_random_wrapper(uint dim, uint* grid, uint* block, enum sg_kernel kernel, double *source, sgIdx_t* pat_dev, sgIdx_t* pat, size_t pat_len, size_t delta, size_t n, size_t wrap, int wpt, size_t seed) { dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, hipMemcpyHostToDevice); hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); // KERNEL if (kernel == GATHER) { if (pat_len == 8) { hipLaunchKernelGGL(( gather_block_random<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 16) { hipLaunchKernelGGL(( gather_block_random<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 32) { hipLaunchKernelGGL(( gather_block_random<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 64) { hipLaunchKernelGGL(( gather_block_random<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 128) { hipLaunchKernelGGL(( gather_block_random<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==256) { hipLaunchKernelGGL(( gather_block_random<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 512) { hipLaunchKernelGGL(( gather_block_random<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 1024) { hipLaunchKernelGGL(( gather_block_random<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 2048) { hipLaunchKernelGGL(( gather_block_random<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==4096) { hipLaunchKernelGGL(( gather_block_random<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } else if (kernel == SCATTER) { if (pat_len == 8) { hipLaunchKernelGGL(( scatter_block_random<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 16) { hipLaunchKernelGGL(( scatter_block_random<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 32) { hipLaunchKernelGGL(( scatter_block_random<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 64) { hipLaunchKernelGGL(( scatter_block_random<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 128) { hipLaunchKernelGGL(( scatter_block_random<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==256) { hipLaunchKernelGGL(( scatter_block_random<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 512) { hipLaunchKernelGGL(( scatter_block_random<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 1024) { hipLaunchKernelGGL(( scatter_block_random<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 2048) { hipLaunchKernelGGL(( scatter_block_random<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==4096) { hipLaunchKernelGGL(( scatter_block_random<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n); }else { printf("ERROR NOT SUPPORTED, %zu\n", pat_len); } } hipEventRecord(stop); hipEventSynchronize(stop); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; } extern "C" float cuda_new_wrapper(uint dim, uint* grid, uint* block, enum sg_kernel kernel, double *source, sgIdx_t* pat_dev, sgIdx_t* pat, size_t pat_len, size_t delta, size_t n, size_t wrap, int wpt) { dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, hipMemcpyHostToDevice); hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); // KERNEL if (pat_len == 8) { hipLaunchKernelGGL(( gather_new<8>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 16) { hipLaunchKernelGGL(( gather_new<16>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 64) { hipLaunchKernelGGL(( gather_new<64>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 256) { hipLaunchKernelGGL(( gather_new<256>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 512) { hipLaunchKernelGGL(( gather_new<512>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 1024) { hipLaunchKernelGGL(( gather_new<1024>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 2048) { hipLaunchKernelGGL(( gather_new<2048>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 4096) { hipLaunchKernelGGL(( gather_new<4096>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt); }else { printf("ERROR NOT SUPPORTED\n"); } hipEventRecord(stop); hipEventSynchronize(stop); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; } /* dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); if(kernel == SCATTER) { if (vector_len == 1) hipLaunchKernelGGL(( scatter_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2) hipLaunchKernelGGL(( scatter_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4) hipLaunchKernelGGL(( scatter_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 5) hipLaunchKernelGGL(( scatter_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 8) hipLaunchKernelGGL(( scatter_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 16) hipLaunchKernelGGL(( scatter_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 32) hipLaunchKernelGGL(( scatter_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 64) hipLaunchKernelGGL(( scatter_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GATHER) { if (vector_len == 1) hipLaunchKernelGGL(( gather_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2) hipLaunchKernelGGL(( gather_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4) hipLaunchKernelGGL(( gather_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 5) hipLaunchKernelGGL(( gather_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 8) hipLaunchKernelGGL(( gather_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 16) hipLaunchKernelGGL(( gather_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 32) hipLaunchKernelGGL(( gather_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 64) hipLaunchKernelGGL(( gather_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GS) { if (vector_len == 1) hipLaunchKernelGGL(( sg_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 2) hipLaunchKernelGGL(( sg_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 4) hipLaunchKernelGGL(( sg_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 5) hipLaunchKernelGGL(( sg_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 8) hipLaunchKernelGGL(( sg_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 16) hipLaunchKernelGGL(( sg_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 32) hipLaunchKernelGGL(( sg_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else if (vector_len == 64) hipLaunchKernelGGL(( sg_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else { printf("ERROR UNRECOGNIZED KERNEL\n"); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; }*/ template<int V> __global__ void sg_block(double *source, double* target, sgIdx_t* pat_gath, sgIdx_t* pat_scat, spSize_t pat_len, size_t delta_gather, size_t delta_scatter, int wpt, char validate) { __shared__ int idx_gath[V]; __shared__ int idx_scat[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_gath[tid] = pat_gath[tid]; idx_scat[tid] = pat_scat[tid]; } int ngatherperblock = blockDim.x / V; int nscatterperblock = ngatherperblock; int gatherid = tid / V; int scatterid = gatherid; double *source_loc = source + (bid*ngatherperblock+gatherid)*delta_gather; double *target_loc = target + (bid*nscatterperblock+scatterid)*delta_scatter; #ifdef VALIDATE if (validate) { final_gather_data_dev = source_loc[idx_gath[tid%V]]; return; } #endif target_loc[idx_scat[tid%V]] = source_loc[idx_gath[tid%V]]; } #define INSTANTIATE3(V)\ template __global__ void sg_block<V>(double* source, double* target, sgIdx_t* pat_gath, sgIdx_t* pat_scat, spSize_t pat_len, size_t delta_gather, size_t delta_scatter, int wpt, char validate); //INSTANTIATE3(1); //INSTANTIATE3(2); //INSTANTIATE3(4); //INSTANTIATE3(5); INSTANTIATE3(8); INSTANTIATE3(16); INSTANTIATE3(32); INSTANTIATE3(64); INSTANTIATE3(73); INSTANTIATE3(128); INSTANTIATE3(256); INSTANTIATE3(512); INSTANTIATE3(1024); INSTANTIATE3(2048); INSTANTIATE3(4096); extern "C" float cuda_block_sg_wrapper(uint dim, uint* grid, uint* block, double *source, double *target, struct run_config* rc, sgIdx_t* pat_gath_dev, sgIdx_t* pat_scat_dev, int wpt, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipMemcpy(pat_gath_dev, rc->pattern_gather, sizeof(sgIdx_t)*rc->pattern_gather_len, hipMemcpyHostToDevice); hipMemcpy(pat_scat_dev, rc->pattern_scatter, sizeof(sgIdx_t)*rc->pattern_scatter_len, hipMemcpyHostToDevice); size_t delta_gather = rc->delta_gather; size_t delta_scatter = rc->delta_scatter; spSize_t pat_len = rc->pattern_gather_len; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); // KERNEL if (pat_len == 8) { hipLaunchKernelGGL(( sg_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( sg_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( sg_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( sg_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 73) { hipLaunchKernelGGL(( sg_block<73>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( sg_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 256) { hipLaunchKernelGGL(( sg_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( sg_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( sg_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( sg_block<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 4096) { hipLaunchKernelGGL(( sg_block<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } hipEventRecord(stop); hipEventSynchronize(stop); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; } template<int V> __global__ void multiscatter_block(double *source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate) { __shared__ int idx[V]; __shared__ int idx_scat[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx[tid] = outer_pat[tid]; idx_scat[tid] = inner_pat[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *source_loc = source + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = source_loc[tid%V]; return; } #endif source_loc[idx[idx_scat[tid%V]]] = target[tid%V]; } #define INSTANTIATE4(V)\ template __global__ void multiscatter_block<V>(double* source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate); //INSTANTIATE4(1); //INSTANTIATE4(2); //INSTANTIATE4(4); //INSTANTIATE4(5); INSTANTIATE4(8); INSTANTIATE4(16); INSTANTIATE4(32); INSTANTIATE4(64); INSTANTIATE4(73); INSTANTIATE4(128); INSTANTIATE4(256); INSTANTIATE4(512); INSTANTIATE4(1024); INSTANTIATE4(2048); INSTANTIATE4(4096); extern "C" float cuda_block_multiscatter_wrapper(uint dim, uint* grid, uint* block, double *source, double *target, struct run_config* rc, sgIdx_t* outer_pat, sgIdx_t* inner_pat, int wpt, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipMemcpy(outer_pat, rc->pattern, sizeof(sgIdx_t)*rc->pattern_len, hipMemcpyHostToDevice); hipMemcpy(inner_pat, rc->pattern_scatter, sizeof(sgIdx_t)*rc->pattern_scatter_len, hipMemcpyHostToDevice); size_t delta = rc->delta; spSize_t pat_len = rc->pattern_scatter_len; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); // KERNEL if (pat_len == 8) { hipLaunchKernelGGL(( multiscatter_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( multiscatter_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( multiscatter_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( multiscatter_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 73) { hipLaunchKernelGGL(( multiscatter_block<73>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( multiscatter_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 256) { hipLaunchKernelGGL(( multiscatter_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( multiscatter_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( multiscatter_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( multiscatter_block<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 4096) { hipLaunchKernelGGL(( multiscatter_block<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } hipEventRecord(stop); hipEventSynchronize(stop); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; } template<int V> __global__ void multigather_block(double *source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate) { __shared__ int idx[V]; __shared__ int idx_gath[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx[tid] = outer_pat[tid]; idx_gath[tid] = inner_pat[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *source_loc = source + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = source_loc[idx[idx_gath[tid%V]]]; return; } #endif target[tid%V] = source_loc[idx[idx_gath[tid%V]]]; } #define INSTANTIATE5(V)\ template __global__ void multigather_block<V>(double* source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate); //INSTANTIATE5(1); //INSTANTIATE5(2); //INSTANTIATE5(4); //INSTANTIATE5(5); INSTANTIATE5(8); INSTANTIATE5(16); INSTANTIATE5(32); INSTANTIATE5(64); INSTANTIATE5(73); INSTANTIATE5(128); INSTANTIATE5(256); INSTANTIATE5(512); INSTANTIATE5(1024); INSTANTIATE5(2048); INSTANTIATE5(4096); extern "C" float cuda_block_multigather_wrapper(uint dim, uint* grid, uint* block, double *source, double *target, struct run_config* rc, sgIdx_t* outer_pat, sgIdx_t* inner_pat, int wpt, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; hipEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; hipMemcpy(outer_pat, rc->pattern, sizeof(sgIdx_t)*rc->pattern_len, hipMemcpyHostToDevice); hipMemcpy(inner_pat, rc->pattern_gather, sizeof(sgIdx_t)*rc->pattern_gather_len, hipMemcpyHostToDevice); size_t delta = rc->delta; spSize_t pat_len = rc->pattern_gather_len; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start); // KERNEL if (pat_len == 8) { hipLaunchKernelGGL(( multigather_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 16) { hipLaunchKernelGGL(( multigather_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 32) { hipLaunchKernelGGL(( multigather_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 64) { hipLaunchKernelGGL(( multigather_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 73) { hipLaunchKernelGGL(( multigather_block<73>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 128) { hipLaunchKernelGGL(( multigather_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 256) { hipLaunchKernelGGL(( multigather_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 512) { hipLaunchKernelGGL(( multigather_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { hipLaunchKernelGGL(( multigather_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { hipLaunchKernelGGL(( multigather_block<2048>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 4096) { hipLaunchKernelGGL(( multigather_block<4096>), dim3(grid_dim), dim3(block_dim), 0, 0, source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } hipEventRecord(stop); hipEventSynchronize(stop); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, hipMemcpyDeviceToHost); float time_ms = 0; hipEventElapsedTime(&time_ms, start, stop); return time_ms; }
89f8bdfe43e0d9afdb99f42b0fb2f36e74166e6b.cu
#include <stdio.h> #include "cuda_kernels.h" #include "../include/parse-args.h" #include <curand_kernel.h> #define typedef uint unsigned long //__device__ int dummy = 0; __device__ int final_block_idx_dev = -1; __device__ int final_thread_idx_dev = -1; __device__ double final_gather_data_dev = -1; template<int v> __global__ void scatter_t(double* target, double* source, long* ti, long* si) { extern __shared__ char space[]; int gid = v*(blockIdx.x * blockDim.x + threadIdx.x); double buf[v]; long idx[v]; for(int i = 0; i < v; i++){ buf[i] = source[gid+i]; } for(int i = 0; i < v; i++){ idx[i] = ti[gid+i]; } for(int i = 0; i < v; i++){ target[idx[i]] = buf[i]; } } template<int v> __global__ void gather_t(double* target, double* source, long* ti, long* si) { extern __shared__ char space[]; int gid = v*(blockIdx.x * blockDim.x + threadIdx.x); double buf[v]; for(int i = 0; i < v; i++){ buf[i] = source[si[gid+i]]; } for(int i = 0; i < v; i++){ target[gid+i] = buf[i]; } } //__global__ void gather_new(double *target, template<int v> __global__ void sg_t(double* target, double* source, long* ti, long* si) { extern __shared__ char space[]; int gid = v*(blockIdx.x * blockDim.x + threadIdx.x); long sidx[v]; long tidx[v]; for(int i = 0; i < v; i++){ sidx[i] = si[gid+i]; } for(int i = 0; i < v; i++){ tidx[i] = ti[gid+i]; } for(int i = 0; i < v; i++){ target[tidx[i]] = source[sidx[i]]; } } #define INSTANTIATE(V)\ template __global__ void scatter_t<V>(double* target, double* source, long* ti, long* si);\ template __global__ void gather_t<V>(double* target, double* source, long* ti, long* si); \ template __global__ void sg_t<V>(double* target, double* source, long* ti, long* si); INSTANTIATE(1); INSTANTIATE(2); INSTANTIATE(4); INSTANTIATE(5); INSTANTIATE(8); INSTANTIATE(16); INSTANTIATE(32); INSTANTIATE(64); INSTANTIATE(128); INSTANTIATE(256); INSTANTIATE(512); INSTANTIATE(1024); INSTANTIATE(2048); INSTANTIATE(4096); extern "C" int translate_args(unsigned int dim, unsigned int* grid, unsigned int* block, dim3 *grid_dim, dim3 *block_dim){ if (!grid || !block || dim == 0 || dim > 3) { return 1; } if (dim == 1) { *grid_dim = dim3(grid[0]); *block_dim = dim3(block[0]); }else if (dim == 2) { *grid_dim = dim3(grid[0], grid[1]); *block_dim = dim3(block[0], block[1]); }else if (dim == 3) { *grid_dim = dim3(grid[0], grid[1], grid[2]); *block_dim = dim3(block[0], block[1], block[2]); } return 0; } extern "C" float cuda_sg_wrapper(enum sg_kernel kernel, size_t vector_len, uint dim, uint* grid, uint* block, double* target, double *source, long* ti, long* si, unsigned int shmem){ dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); if(kernel == SCATTER) { if (vector_len == 1) scatter_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2) scatter_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4) scatter_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 5) scatter_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 8) scatter_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 16) scatter_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 32) scatter_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 64) scatter_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 128) scatter_t<128><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 256) scatter_t<256><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 512) scatter_t<512><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 1024) scatter_t<1024><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2048) scatter_t<2048><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4096) scatter_t<4096><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GATHER) { if (vector_len == 1) gather_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2) gather_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4) gather_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 5) gather_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 8) gather_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 16) gather_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 32) gather_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 64) gather_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 128) gather_t<128><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 256) gather_t<256><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 512) gather_t<512><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 1024) gather_t<1024><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2048) gather_t<2048><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4096) gather_t<4096><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GS) { if (vector_len == 1) sg_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2) sg_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4) sg_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 5) sg_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 8) sg_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 16) sg_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 32) sg_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 64) sg_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 128) sg_t<128><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 256) sg_t<256><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 512) sg_t<512><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 1024) sg_t<1024><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2048) sg_t<2048><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4096) sg_t<4096><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else { printf("ERROR UNRECOGNIZED KERNEL\n"); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; } //assume block size >= index buffer size //assume index buffer size divides block size template<int V> __global__ void scatter_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_shared[tid] = idx[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+gatherid)*delta; //for (int i = 0; i < wpb; i++) { src_loc[idx_shared[tid%V]] = idx_shared[tid%V]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} } //assume block size >= index buffer size //assume index buffer size divides block size template<int V> __global__ void scatter_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; int ngatherperblock = blockDim.x / V; int gatherid = tid / V; unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence unsigned long long offset = gatherid; curandState_t state; curand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc int random_gatherid = (int)(n * curand_uniform(&state)); if (tid < V) { idx_shared[tid] = idx[tid]; } double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta; //for (int i = 0; i < wpb; i++) { src_loc[idx_shared[tid%V]] = idx_shared[tid%V]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} } //V2 = 8 //assume block size >= index buffer size //assume index buffer size divides block size template<int V> __global__ void gather_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_shared[tid] = idx[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = src_loc[idx_shared[tid%V]]; return; } #endif double x; //for (int i = 0; i < wpb; i++) { x = src_loc[idx_shared[tid%V]]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } template<int V> __global__ void gather_block_morton(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, uint32_t *order, char validate) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_shared[tid] = idx[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+order[gatherid])*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = src_loc[idx_shared[tid%V]]; return; } #endif double x; //for (int i = 0; i < wpb; i++) { x = src_loc[idx_shared[tid%V]]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } template<int V> __global__ void gather_block_stride(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, int stride, char validate) { int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *src_loc = src + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = src_loc[stride*(tid%V)]; } #endif double x; //for (int i = 0; i < wpb; i++) { x = src_loc[stride*(tid%V)]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } template<int V> __global__ void gather_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n) { __shared__ int idx_shared[V]; int tid = threadIdx.x; int bid = blockIdx.x; int ngatherperblock = blockDim.x / V; int gatherid = tid / V; unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence unsigned long long offset = gatherid; curandState_t state; curand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc int random_gatherid = (int)(n * curand_uniform(&state)); if (tid < V) { idx_shared[tid] = idx[tid]; } double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta; double x; //for (int i = 0; i < wpb; i++) { x = src_loc[idx_shared[tid%V]]; //src_loc[idx_shared[tid%V]] = 1337.; //src_loc += delta; //} if (x==0.5) src[0] = x; } //todo -- add WRAP template<int V> __global__ void gather_new(double* source, sgIdx_t* idx, size_t delta, int dummy, int wpt) { __shared__ int idx_shared[V]; int tid = threadIdx.x; //int bid = blockIdx.x; //int nblk = blockDim.x; if (tid < V) { idx_shared[tid] = idx[tid]; } int gid = (blockIdx.x * blockDim.x + threadIdx.x); double *sl = source + wpt*gid*delta; double buf[V]; for (int j = 0; j < wpt; j++) { for (int i = 0; i < V; i++) { buf[i] = sl[idx_shared[i]]; //source[i+gid*delta] = 8; //sl[i] = sl[idx[i]]; } sl = sl + delta; } if (dummy) { sl[idx_shared[0]] = buf[dummy]; } /* for (int i = 0; i < V; i++) { if (buf[i] == 199402) { printf("oop\n"); } } */ //printf("idx[1]: %d\n", idx[1]); /* for (int i = 0; i < V; i++) { printf("idx %d is %zu", i, idx[i]); } printf("\n"); */ } #define INSTANTIATE2(V)\ template __global__ void gather_new<V>(double* source, sgIdx_t* idx, size_t delta, int dummy, int wpt); \ template __global__ void gather_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate);\ template __global__ void gather_block_morton<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, uint32_t *order, char validate);\ template __global__ void gather_block_stride<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, int stride, char validate);\ template __global__ void scatter_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, char validate); \ template __global__ void gather_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n); \ template __global__ void scatter_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n); //INSTANTIATE2(1); //INSTANTIATE2(2); //INSTANTIATE2(4); //INSTANTIATE2(5); INSTANTIATE2(8); INSTANTIATE2(16); INSTANTIATE2(32); INSTANTIATE2(64); INSTANTIATE2(73); INSTANTIATE2(128); INSTANTIATE2(256); INSTANTIATE2(512); INSTANTIATE2(1024); INSTANTIATE2(2048); INSTANTIATE2(4096); extern "C" float cuda_block_wrapper(uint dim, uint* grid, uint* block, enum sg_kernel kernel, double *source, sgIdx_t* pat_dev, sgIdx_t* pat, size_t pat_len, size_t delta, size_t n, size_t wrap, int wpt, size_t morton, uint32_t *order, uint32_t *order_dev, int stride, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, cudaMemcpyHostToDevice); cudaMemcpy(order_dev, order, sizeof(uint32_t)*n, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); // KERNEL if (kernel == GATHER) { if (morton) { if (pat_len == 8) { gather_block_morton<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 16) { gather_block_morton<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 32) { gather_block_morton<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 64) { gather_block_morton<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 73) { gather_block_morton<73><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 128) { gather_block_morton<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 256) { gather_block_morton<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 512) { gather_block_morton<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 1024) { gather_block_morton<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 2048) { gather_block_morton<2048><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else if (pat_len == 4096) { gather_block_morton<4096><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, order_dev, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } else if (stride >= 0) { if (pat_len == 8) { gather_block_stride<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 16) { gather_block_stride<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 32) { gather_block_stride<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 64) { gather_block_stride<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 73) { gather_block_stride<73><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 128) { gather_block_stride<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 256) { gather_block_stride<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 512) { gather_block_stride<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 1024) { gather_block_stride<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 2048) { gather_block_stride<2048><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else if (pat_len == 4096) { gather_block_stride<4096><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, stride, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } else { if (pat_len == 8) { gather_block<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 16) { gather_block<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 32) { gather_block<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 64) { gather_block<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 73) { gather_block<73><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 128) { gather_block<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 256) { gather_block<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 512) { gather_block<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { gather_block<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { gather_block<2048><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 4096) { gather_block<4096><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } cudaMemcpyFromSymbol(final_gather_data, final_gather_data_dev, sizeof(double), 0, cudaMemcpyDeviceToHost); } else if (kernel == SCATTER) { if (pat_len == 8) { scatter_block<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 16) { scatter_block<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 32) { scatter_block<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 64) { scatter_block<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 128) { scatter_block<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len ==256) { scatter_block<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 512) { scatter_block<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { scatter_block<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { scatter_block<2048><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else if (pat_len ==4096) { scatter_block<4096><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED, %zu\n", pat_len); } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; } extern "C" float cuda_block_random_wrapper(uint dim, uint* grid, uint* block, enum sg_kernel kernel, double *source, sgIdx_t* pat_dev, sgIdx_t* pat, size_t pat_len, size_t delta, size_t n, size_t wrap, int wpt, size_t seed) { dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); // KERNEL if (kernel == GATHER) { if (pat_len == 8) { gather_block_random<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 16) { gather_block_random<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 32) { gather_block_random<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 64) { gather_block_random<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 128) { gather_block_random<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==256) { gather_block_random<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 512) { gather_block_random<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 1024) { gather_block_random<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 2048) { gather_block_random<2048><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==4096) { gather_block_random<4096><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } } else if (kernel == SCATTER) { if (pat_len == 8) { scatter_block_random<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 16) { scatter_block_random<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 32) { scatter_block_random<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 64) { scatter_block_random<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 128) { scatter_block_random<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==256) { scatter_block_random<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 512) { scatter_block_random<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 1024) { scatter_block_random<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len == 2048) { scatter_block_random<2048><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else if (pat_len ==4096) { scatter_block_random<4096><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n); }else { printf("ERROR NOT SUPPORTED, %zu\n", pat_len); } } cudaEventRecord(stop); cudaEventSynchronize(stop); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; } extern "C" float cuda_new_wrapper(uint dim, uint* grid, uint* block, enum sg_kernel kernel, double *source, sgIdx_t* pat_dev, sgIdx_t* pat, size_t pat_len, size_t delta, size_t n, size_t wrap, int wpt) { dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); // KERNEL if (pat_len == 8) { gather_new<8><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 16) { gather_new<16><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 64) { gather_new<64><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 256) { gather_new<256><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 512) { gather_new<512><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 1024) { gather_new<1024><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 2048) { gather_new<2048><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else if (pat_len == 4096) { gather_new<4096><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt); }else { printf("ERROR NOT SUPPORTED\n"); } cudaEventRecord(stop); cudaEventSynchronize(stop); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; } /* dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); if(kernel == SCATTER) { if (vector_len == 1) scatter_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2) scatter_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4) scatter_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 5) scatter_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 8) scatter_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 16) scatter_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 32) scatter_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 64) scatter_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GATHER) { if (vector_len == 1) gather_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2) gather_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4) gather_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 5) gather_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 8) gather_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 16) gather_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 32) gather_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 64) gather_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else if(kernel == GS) { if (vector_len == 1) sg_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 2) sg_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 4) sg_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 5) sg_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 8) sg_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 16) sg_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 32) sg_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else if (vector_len == 64) sg_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si); else { printf("ERROR: UNSUPPORTED VECTOR LENGTH\n"); exit(1); } } else { printf("ERROR UNRECOGNIZED KERNEL\n"); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; }*/ template<int V> __global__ void sg_block(double *source, double* target, sgIdx_t* pat_gath, sgIdx_t* pat_scat, spSize_t pat_len, size_t delta_gather, size_t delta_scatter, int wpt, char validate) { __shared__ int idx_gath[V]; __shared__ int idx_scat[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx_gath[tid] = pat_gath[tid]; idx_scat[tid] = pat_scat[tid]; } int ngatherperblock = blockDim.x / V; int nscatterperblock = ngatherperblock; int gatherid = tid / V; int scatterid = gatherid; double *source_loc = source + (bid*ngatherperblock+gatherid)*delta_gather; double *target_loc = target + (bid*nscatterperblock+scatterid)*delta_scatter; #ifdef VALIDATE if (validate) { final_gather_data_dev = source_loc[idx_gath[tid%V]]; return; } #endif target_loc[idx_scat[tid%V]] = source_loc[idx_gath[tid%V]]; } #define INSTANTIATE3(V)\ template __global__ void sg_block<V>(double* source, double* target, sgIdx_t* pat_gath, sgIdx_t* pat_scat, spSize_t pat_len, size_t delta_gather, size_t delta_scatter, int wpt, char validate); //INSTANTIATE3(1); //INSTANTIATE3(2); //INSTANTIATE3(4); //INSTANTIATE3(5); INSTANTIATE3(8); INSTANTIATE3(16); INSTANTIATE3(32); INSTANTIATE3(64); INSTANTIATE3(73); INSTANTIATE3(128); INSTANTIATE3(256); INSTANTIATE3(512); INSTANTIATE3(1024); INSTANTIATE3(2048); INSTANTIATE3(4096); extern "C" float cuda_block_sg_wrapper(uint dim, uint* grid, uint* block, double *source, double *target, struct run_config* rc, sgIdx_t* pat_gath_dev, sgIdx_t* pat_scat_dev, int wpt, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaMemcpy(pat_gath_dev, rc->pattern_gather, sizeof(sgIdx_t)*rc->pattern_gather_len, cudaMemcpyHostToDevice); cudaMemcpy(pat_scat_dev, rc->pattern_scatter, sizeof(sgIdx_t)*rc->pattern_scatter_len, cudaMemcpyHostToDevice); size_t delta_gather = rc->delta_gather; size_t delta_scatter = rc->delta_scatter; spSize_t pat_len = rc->pattern_gather_len; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); // KERNEL if (pat_len == 8) { sg_block<8><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 16) { sg_block<16><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 32) { sg_block<32><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 64) { sg_block<64><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 73) { sg_block<73><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 128) { sg_block<128><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 256) { sg_block<256><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 512) { sg_block<512><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 1024) { sg_block<1024><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 2048) { sg_block<2048><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else if (pat_len == 4096) { sg_block<4096><<<grid_dim, block_dim>>>(source, target, pat_gath_dev, pat_scat_dev, pat_len, delta_gather, delta_scatter, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; } template<int V> __global__ void multiscatter_block(double *source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate) { __shared__ int idx[V]; __shared__ int idx_scat[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx[tid] = outer_pat[tid]; idx_scat[tid] = inner_pat[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *source_loc = source + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = source_loc[tid%V]; return; } #endif source_loc[idx[idx_scat[tid%V]]] = target[tid%V]; } #define INSTANTIATE4(V)\ template __global__ void multiscatter_block<V>(double* source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate); //INSTANTIATE4(1); //INSTANTIATE4(2); //INSTANTIATE4(4); //INSTANTIATE4(5); INSTANTIATE4(8); INSTANTIATE4(16); INSTANTIATE4(32); INSTANTIATE4(64); INSTANTIATE4(73); INSTANTIATE4(128); INSTANTIATE4(256); INSTANTIATE4(512); INSTANTIATE4(1024); INSTANTIATE4(2048); INSTANTIATE4(4096); extern "C" float cuda_block_multiscatter_wrapper(uint dim, uint* grid, uint* block, double *source, double *target, struct run_config* rc, sgIdx_t* outer_pat, sgIdx_t* inner_pat, int wpt, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaMemcpy(outer_pat, rc->pattern, sizeof(sgIdx_t)*rc->pattern_len, cudaMemcpyHostToDevice); cudaMemcpy(inner_pat, rc->pattern_scatter, sizeof(sgIdx_t)*rc->pattern_scatter_len, cudaMemcpyHostToDevice); size_t delta = rc->delta; spSize_t pat_len = rc->pattern_scatter_len; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); // KERNEL if (pat_len == 8) { multiscatter_block<8><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 16) { multiscatter_block<16><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 32) { multiscatter_block<32><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 64) { multiscatter_block<64><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 73) { multiscatter_block<73><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 128) { multiscatter_block<128><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 256) { multiscatter_block<256><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 512) { multiscatter_block<512><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { multiscatter_block<1024><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { multiscatter_block<2048><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 4096) { multiscatter_block<4096><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; } template<int V> __global__ void multigather_block(double *source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate) { __shared__ int idx[V]; __shared__ int idx_gath[V]; int tid = threadIdx.x; int bid = blockIdx.x; #ifdef VALIDATE if (validate) { final_block_idx_dev = blockIdx.x; final_thread_idx_dev = threadIdx.x; } #endif if (tid < V) { idx[tid] = outer_pat[tid]; idx_gath[tid] = inner_pat[tid]; } int ngatherperblock = blockDim.x / V; int gatherid = tid / V; double *source_loc = source + (bid*ngatherperblock+gatherid)*delta; #ifdef VALIDATE if (validate) { final_gather_data_dev = source_loc[idx[idx_gath[tid%V]]]; return; } #endif target[tid%V] = source_loc[idx[idx_gath[tid%V]]]; } #define INSTANTIATE5(V)\ template __global__ void multigather_block<V>(double* source, double* target, sgIdx_t* outer_pat, sgIdx_t* inner_pat, spSize_t pat_len, size_t delta, int wpt, char validate); //INSTANTIATE5(1); //INSTANTIATE5(2); //INSTANTIATE5(4); //INSTANTIATE5(5); INSTANTIATE5(8); INSTANTIATE5(16); INSTANTIATE5(32); INSTANTIATE5(64); INSTANTIATE5(73); INSTANTIATE5(128); INSTANTIATE5(256); INSTANTIATE5(512); INSTANTIATE5(1024); INSTANTIATE5(2048); INSTANTIATE5(4096); extern "C" float cuda_block_multigather_wrapper(uint dim, uint* grid, uint* block, double *source, double *target, struct run_config* rc, sgIdx_t* outer_pat, sgIdx_t* inner_pat, int wpt, int *final_block_idx, int *final_thread_idx, double *final_gather_data, char validate) { dim3 grid_dim, block_dim; cudaEvent_t start, stop; if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0; cudaMemcpy(outer_pat, rc->pattern, sizeof(sgIdx_t)*rc->pattern_len, cudaMemcpyHostToDevice); cudaMemcpy(inner_pat, rc->pattern_gather, sizeof(sgIdx_t)*rc->pattern_gather_len, cudaMemcpyHostToDevice); size_t delta = rc->delta; spSize_t pat_len = rc->pattern_gather_len; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start); // KERNEL if (pat_len == 8) { multigather_block<8><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 16) { multigather_block<16><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 32) { multigather_block<32><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 64) { multigather_block<64><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 73) { multigather_block<73><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 128) { multigather_block<128><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 256) { multigather_block<256><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 512) { multigather_block<512><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 1024) { multigather_block<1024><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 2048) { multigather_block<2048><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else if (pat_len == 4096) { multigather_block<4096><<<grid_dim, block_dim>>>(source, target, outer_pat, inner_pat, pat_len, delta, wpt, validate); }else { printf("ERROR NOT SUPPORTED: %zu\n", pat_len); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpyFromSymbol(final_block_idx, final_block_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(final_thread_idx, final_thread_idx_dev, sizeof(int), 0, cudaMemcpyDeviceToHost); float time_ms = 0; cudaEventElapsedTime(&time_ms, start, stop); return time_ms; }
610e76cedeb5ad56b05548e18b3aa4394c851f4b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <include/utils.cuh> #include <layers/element_wise_function.hpp> #include <layers/sigmoid_layer.hpp> #include <linalg/binary_op.cuh> #include <linalg/unary_op.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { template <typename T> __device__ T exponential(T in) { return exp(in); } template __device__ float exponential<float>(float); template <> __device__ __half exponential(__half in) { return hexp(in); } template <> __device__ __half2 exponential<__half2>(__half2 in) { return h2exp(in); } template <typename T> SigmoidLayer<T>::SigmoidLayer(const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource) { assert(in_tensor.get_num_elements() == out_tensor.get_num_elements()); assert(in_tensor.get_num_elements() % 2 == 0); in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); } template <typename T> void SigmoidLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); auto fop = [] __device__(T in) { return T(1) / (T(1) + exponential(-in)); }; MLCommon::LinAlg::unaryOp(out_tensors_[0].get_ptr(), in_tensors_[0].get_ptr(), len, fop, get_gpu().get_stream()); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template <> void SigmoidLayer<__half>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); const __half2 one2 = __float2half2_rn(1.0f); auto fop = [one2] __device__(__half2 in) { return one2 / (one2 + exponential(-in)); }; MLCommon::LinAlg::unaryOp(reinterpret_cast<__half2*>(out_tensors_[0].get_ptr()), reinterpret_cast<__half2*>(in_tensors_[0].get_ptr()), len / 2, fop, get_gpu().get_stream()); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template <typename T> void SigmoidLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); auto bop = [] __device__(T d_out, T d_in) { T y = T(1) / (T(1) + exponential(-d_in)); return d_out * y * (T(1) - y); }; MLCommon::LinAlg::binaryOp(in_tensors_[0].get_ptr(), out_tensors_[0].get_ptr(), in_tensors_[0].get_ptr(), len, bop, get_gpu().get_stream()); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template <> void SigmoidLayer<__half>::bprop() { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); const __half2 one2 = __float2half2_rn(1.0f); auto bop = [one2] __device__(__half2 d_out, __half2 d_in) { __half2 y = one2 / (one2 + exponential(-d_in)); return d_out * y * (one2 - y); }; MLCommon::LinAlg::binaryOp(reinterpret_cast<__half2*>(in_tensors_[0].get_ptr()), reinterpret_cast<__half2*>(out_tensors_[0].get_ptr()), reinterpret_cast<__half2*>(in_tensors_[0].get_ptr()), len / 2, bop, get_gpu().get_stream()); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template class SigmoidLayer<float>; template class SigmoidLayer<__half>; } // namespace HugeCTR
610e76cedeb5ad56b05548e18b3aa4394c851f4b.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <functional> #include <include/utils.cuh> #include <layers/element_wise_function.hpp> #include <layers/sigmoid_layer.hpp> #include <linalg/binary_op.cuh> #include <linalg/unary_op.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { template <typename T> __device__ T exponential(T in) { return exp(in); } template __device__ float exponential<float>(float); template <> __device__ __half exponential(__half in) { return hexp(in); } template <> __device__ __half2 exponential<__half2>(__half2 in) { return h2exp(in); } template <typename T> SigmoidLayer<T>::SigmoidLayer(const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource) { assert(in_tensor.get_num_elements() == out_tensor.get_num_elements()); assert(in_tensor.get_num_elements() % 2 == 0); in_tensors_.push_back(in_tensor); out_tensors_.push_back(out_tensor); } template <typename T> void SigmoidLayer<T>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); auto fop = [] __device__(T in) { return T(1) / (T(1) + exponential(-in)); }; MLCommon::LinAlg::unaryOp(out_tensors_[0].get_ptr(), in_tensors_[0].get_ptr(), len, fop, get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <> void SigmoidLayer<__half>::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); const __half2 one2 = __float2half2_rn(1.0f); auto fop = [one2] __device__(__half2 in) { return one2 / (one2 + exponential(-in)); }; MLCommon::LinAlg::unaryOp(reinterpret_cast<__half2*>(out_tensors_[0].get_ptr()), reinterpret_cast<__half2*>(in_tensors_[0].get_ptr()), len / 2, fop, get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <typename T> void SigmoidLayer<T>::bprop() { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); auto bop = [] __device__(T d_out, T d_in) { T y = T(1) / (T(1) + exponential(-d_in)); return d_out * y * (T(1) - y); }; MLCommon::LinAlg::binaryOp(in_tensors_[0].get_ptr(), out_tensors_[0].get_ptr(), in_tensors_[0].get_ptr(), len, bop, get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <> void SigmoidLayer<__half>::bprop() { CudaDeviceContext context(get_device_id()); int len = in_tensors_[0].get_num_elements(); const __half2 one2 = __float2half2_rn(1.0f); auto bop = [one2] __device__(__half2 d_out, __half2 d_in) { __half2 y = one2 / (one2 + exponential(-d_in)); return d_out * y * (one2 - y); }; MLCommon::LinAlg::binaryOp(reinterpret_cast<__half2*>(in_tensors_[0].get_ptr()), reinterpret_cast<__half2*>(out_tensors_[0].get_ptr()), reinterpret_cast<__half2*>(in_tensors_[0].get_ptr()), len / 2, bop, get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template class SigmoidLayer<float>; template class SigmoidLayer<__half>; } // namespace HugeCTR
ec6b8f24108277fabf13f4bd9cc81b7fa520439b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "c++_feature_test.cuh" #include <stdlib.h> #include <stdio.h> #include <bitset> #include <stdio.h> #define THREADS_PER_BLOCK 256 #if __CUDA_ARCH__ >= 200 #define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK) #define MY_KERNEL_MIN_BLOCKS 3 #else #define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK #define MY_KERNEL_MIN_BLOCKS 2 #endif void BinaryBitset(int n) { cout<<bitset<sizeof(int) * 8>(n)<<endl; } __global__ void vote_all(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } int temp = a[tid]; b[tid] = __all_sync(0xffffffff, temp > 100); } __global__ void vote_any(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } int temp = a[tid]; b[tid] = __any_sync(0xffffffff, temp > 100); } __global__ void vote_ballot(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } int temp = a[tid]; b[tid] = __ballot_sync(0xffffffff, temp > 100); } __global__ void activemask(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } b[tid] = __activemask(); } int test_feature() { int *h_a, *h_b, *d_a, *d_b; int n = 256, m = 10; int nsize = n * sizeof(int); h_a = (int *)malloc(nsize); h_b = (int *)malloc(nsize); int vote = 0; for (int i = 0; i < n; ++i) { h_a[i] = i; //cout << h_a[i] << endl; } memset(h_b, 0, nsize); hipMalloc(&d_a, nsize); hipMalloc(&d_b, nsize); hipMemcpy(d_a, h_a, nsize, hipMemcpyHostToDevice); hipMemset(d_b, 0, nsize); vote_all << <1, 256 >> > (d_a, d_b, n); hipMemcpy(h_b, d_b, nsize, hipMemcpyDeviceToHost); printf("vote_all():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } if (h_b[i] == 0) vote += 1; printf("%d", h_b[i]); } printf("\n"); cout << "vote-----" << vote << endl; vote = 0; vote_any << <1, 256 >> > (d_a, d_b, n); hipMemcpy(h_b, d_b, nsize, hipMemcpyDeviceToHost); printf("vote_any():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } printf("%d", h_b[i]); if (h_b[i] == 0) vote += 1; } printf("\n"); cout << "vote-----" << vote << endl; vote_ballot << <1, 256 >> > (d_a, d_b, n); hipMemcpy(h_b, d_b, nsize, hipMemcpyDeviceToHost); vote = 0; printf("vote_ballot():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } if (h_b[i] == 0) vote += 1; printf(",%d", (uint)h_b[i]); } printf("\n"); cout << "vote-----" << vote << endl; vote = 0; activemask << <1, 256 >> > (d_a, d_b, n); hipMemcpy(h_b, d_b, nsize, hipMemcpyDeviceToHost); vote = 0; printf("activemask():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } if (h_b[i] == 0) vote += 1; printf(",%d", (uint)h_b[i]); } printf("\n"); cout << "vote-----" << vote << endl; BinaryBitset(-32); BinaryBitset(-1); return 0; }
ec6b8f24108277fabf13f4bd9cc81b7fa520439b.cu
#include "c++_feature_test.cuh" #include <stdlib.h> #include <stdio.h> #include <bitset> #include <stdio.h> #define THREADS_PER_BLOCK 256 #if __CUDA_ARCH__ >= 200 #define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK) #define MY_KERNEL_MIN_BLOCKS 3 #else #define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK #define MY_KERNEL_MIN_BLOCKS 2 #endif void BinaryBitset(int n) { cout<<bitset<sizeof(int) * 8>(n)<<endl; } __global__ void vote_all(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } int temp = a[tid]; b[tid] = __all_sync(0xffffffff, temp > 100); } __global__ void vote_any(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } int temp = a[tid]; b[tid] = __any_sync(0xffffffff, temp > 100); } __global__ void vote_ballot(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } int temp = a[tid]; b[tid] = __ballot_sync(0xffffffff, temp > 100); } __global__ void activemask(int *a, int *b, int n) { int tid = threadIdx.x; if (tid > n) { return; } b[tid] = __activemask(); } int test_feature() { int *h_a, *h_b, *d_a, *d_b; int n = 256, m = 10; int nsize = n * sizeof(int); h_a = (int *)malloc(nsize); h_b = (int *)malloc(nsize); int vote = 0; for (int i = 0; i < n; ++i) { h_a[i] = i; //cout << h_a[i] << endl; } memset(h_b, 0, nsize); cudaMalloc(&d_a, nsize); cudaMalloc(&d_b, nsize); cudaMemcpy(d_a, h_a, nsize, cudaMemcpyHostToDevice); cudaMemset(d_b, 0, nsize); vote_all << <1, 256 >> > (d_a, d_b, n); cudaMemcpy(h_b, d_b, nsize, cudaMemcpyDeviceToHost); printf("vote_all():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } if (h_b[i] == 0) vote += 1; printf("%d", h_b[i]); } printf("\n"); cout << "vote-----" << vote << endl; vote = 0; vote_any << <1, 256 >> > (d_a, d_b, n); cudaMemcpy(h_b, d_b, nsize, cudaMemcpyDeviceToHost); printf("vote_any():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } printf("%d", h_b[i]); if (h_b[i] == 0) vote += 1; } printf("\n"); cout << "vote-----" << vote << endl; vote_ballot << <1, 256 >> > (d_a, d_b, n); cudaMemcpy(h_b, d_b, nsize, cudaMemcpyDeviceToHost); vote = 0; printf("vote_ballot():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } if (h_b[i] == 0) vote += 1; printf(",%d", (uint)h_b[i]); } printf("\n"); cout << "vote-----" << vote << endl; vote = 0; activemask << <1, 256 >> > (d_a, d_b, n); cudaMemcpy(h_b, d_b, nsize, cudaMemcpyDeviceToHost); vote = 0; printf("activemask():"); for (int i = 0; i < n; ++i) { if (!(i % m)) { printf("\n"); } if (h_b[i] == 0) vote += 1; printf(",%d", (uint)h_b[i]); } printf("\n"); cout << "vote-----" << vote << endl; BinaryBitset(-32); BinaryBitset(-1); return 0; }
eb52241f7b435947b733742871446aa499797773.hip
// !!! This is a file automatically generated by hipify!!! #include <blas_quda.h> #include <tune_quda.h> #include <float_vector.h> #include <color_spinor_field_order.h> #include <uint_to_char.h> //#define QUAD_SUM #ifdef QUAD_SUM #include <dbldbl.h> #endif #include <cub_helper.cuh> template<typename> struct ScalarType { }; template<> struct ScalarType<double> { typedef double type; }; template<> struct ScalarType<double2> { typedef double type; }; template<> struct ScalarType<double3> { typedef double type; }; template<typename> struct Vec2Type { }; template<> struct Vec2Type<double> { typedef double2 type; }; #ifdef QUAD_SUM #define QudaSumFloat doubledouble #define QudaSumFloat2 doubledouble2 #define QudaSumFloat3 doubledouble3 template<> struct ScalarType<doubledouble> { typedef doubledouble type; }; template<> struct ScalarType<doubledouble2> { typedef doubledouble type; }; template<> struct ScalarType<doubledouble3> { typedef doubledouble type; }; template<> struct Vec2Type<doubledouble> { typedef doubledouble2 type; }; #else #define QudaSumFloat double #define QudaSumFloat2 double2 #define QudaSumFloat3 double3 #endif // work around for Fermi #if (__COMPUTE_CAPABILITY__ < 300) #undef MAX_MULTI_BLAS_N #define MAX_MULTI_BLAS_N 2 #endif static void checkSpinor(const ColorSpinorField &a, const ColorSpinorField &b) { if (a.Length() != b.Length()) errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); if (a.Stride() != b.Stride()) errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); } static struct { const char *vol_str; const char *aux_str; char aux_tmp[quda::TuneKey::aux_n]; } blasStrings; namespace quda { // hooks into tune.cpp variables for policy tuning typedef std::map<TuneKey, TuneParam> map; const map& getTuneCache(); void disableProfileCount(); void enableProfileCount(); void setPolicyTuning(bool); namespace blas { hipStream_t* getStream(); hipEvent_t* getReduceEvent(); template <int writeX, int writeY, int writeZ, int writeW> struct write { static constexpr int X = writeX; static constexpr int Y = writeY; static constexpr int Z = writeZ; static constexpr int W = writeW; }; namespace reduce { namespace multi { #define BLAS_SPINOR // do not include ghost functions in Spinor class to reduce parameter space overhead #include <texture.h> } #include <multi_reduce_core.cuh> #include <multi_reduce_core.h> } // namespace reduce /** Base class from which all reduction functors should derive. */ template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct MultiReduceFunctor { //! pre-computation routine called before the "M-loop" virtual __device__ __host__ void pre() { ; } //! where the reduction is usually computed and any auxiliary operations virtual __device__ __host__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) = 0; //! post-computation routine called after the "M-loop" virtual __device__ __host__ void post(ReduceType &sum) { ; } }; /** Return the real dot product of x and y Broken at the moment---need to update reDotProduct with permuting, etc of cDotProduct below. */ template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const double2 &a, const double2 &b) { sum += (ReduceType)a.x*(ReduceType)b.x; sum += (ReduceType)a.y*(ReduceType)b.y; } template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float2 &a, const float2 &b) { sum += (ReduceType)a.x*(ReduceType)b.x; sum += (ReduceType)a.y*(ReduceType)b.y; } template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float4 &a, const float4 &b) { sum += (ReduceType)a.x*(ReduceType)b.x; sum += (ReduceType)a.y*(ReduceType)b.y; sum += (ReduceType)a.z*(ReduceType)b.z; sum += (ReduceType)a.w*(ReduceType)b.w; } template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct Dot : public MultiReduceFunctor<NXZ, ReduceType, Float2, FloatN> { typedef typename scalar<Float2>::type real; const int NYW; Dot(const reduce::coeff_array<Complex> &a, const reduce::coeff_array<Complex> &b, const reduce::coeff_array<Complex> &c, int NYW) : NYW(NYW) { ; } __device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) { dot_<ReduceType>(sum,x,y); } static int streams() { return 2; } //! total number of input and output streams static int flops() { return 2; } //! flops per element }; void reDotProduct(double* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ #ifndef SSTEP errorQuda("S-step code not built\n"); #else switch(x.size()){ case 1: reduce::multiReduceCuda<1,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 2: reduce::multiReduceCuda<2,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 3: reduce::multiReduceCuda<3,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 4: reduce::multiReduceCuda<4,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 5: reduce::multiReduceCuda<5,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 6: reduce::multiReduceCuda<6,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 7: reduce::multiReduceCuda<7,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 8: reduce::multiReduceCuda<8,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; /*case 9: reduce::multiReduceCuda<9,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 10: reduce::multiReduceCuda<10,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 11: reduce::multiReduceCuda<11,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 12: reduce::multiReduceCuda<12,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 13: reduce::multiReduceCuda<13,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 14: reduce::multiReduceCuda<14,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 15: reduce::multiReduceCuda<15,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 16: reduce::multiReduceCuda<16,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break;*/ default: errorQuda("Unsupported vector size"); break; } #endif // SSTEP // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = x.size()*y.size(); reduceDoubleArray((double*)result, Nreduce); } /** Returns complex-valued dot product of x and y */ template<typename ReduceType> __device__ __host__ void cdot_(ReduceType &sum, const double2 &a, const double2 &b) { typedef typename ScalarType<ReduceType>::type scalar; sum.x += (scalar)a.x*(scalar)b.x; sum.x += (scalar)a.y*(scalar)b.y; sum.y += (scalar)a.x*(scalar)b.y; sum.y -= (scalar)a.y*(scalar)b.x; } template<typename ReduceType> __device__ __host__ void cdot_(ReduceType &sum, const float2 &a, const float2 &b) { typedef typename ScalarType<ReduceType>::type scalar; sum.x += (scalar)a.x*(scalar)b.x; sum.x += (scalar)a.y*(scalar)b.y; sum.y += (scalar)a.x*(scalar)b.y; sum.y -= (scalar)a.y*(scalar)b.x; } template<typename ReduceType> __device__ __host__ void cdot_(ReduceType &sum, const float4 &a, const float4 &b) { typedef typename ScalarType<ReduceType>::type scalar; sum.x += (scalar)a.x*(scalar)b.x; sum.x += (scalar)a.y*(scalar)b.y; sum.x += (scalar)a.z*(scalar)b.z; sum.x += (scalar)a.w*(scalar)b.w; sum.y += (scalar)a.x*(scalar)b.y; sum.y -= (scalar)a.y*(scalar)b.x; sum.y += (scalar)a.z*(scalar)b.w; sum.y -= (scalar)a.w*(scalar)b.z; } template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct Cdot : public MultiReduceFunctor<NXZ, ReduceType, Float2, FloatN> { typedef typename scalar<Float2>::type real; const int NYW; Cdot(const reduce::coeff_array<Complex> &a, const reduce::coeff_array<Complex> &b, const reduce::coeff_array<Complex> &c, int NYW) : NYW(NYW) { ; } __device__ __host__ inline void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) { cdot_<ReduceType>(sum,x,y); } static int streams() { return 2; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct CdotCopy : public MultiReduceFunctor<NXZ, ReduceType, Float2, FloatN> { typedef typename scalar<Float2>::type real; const int NYW; CdotCopy(const reduce::coeff_array<Complex> &a, const reduce::coeff_array<Complex> &b, const reduce::coeff_array<Complex> &c, int NYW) : NYW(NYW) { ; } __device__ __host__ inline void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) { cdot_<ReduceType>(sum,x,y); if (i==j) w = y;} static int streams() { return 2; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; // This function does the outer product of dot products... in column major. // There's a function below called 'cDotProduct' that flips it to row major. template <template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerDiagonal, typename writeDiagonal, template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerOffDiagonal, typename writeOffDiagonal> void multiReduce_recurse(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y, std::vector<ColorSpinorField*>&z, std::vector<ColorSpinorField*>&w, int i_idx, int j_idx, bool hermitian, unsigned int tile_size) { if (y.size() > tile_size) // if greater than max single-kernel size, split and recurse { // Do the recurse first. Complex* result0 = &result[0]; Complex* result1 = &result[x.size()*(y.size()/2)]; std::vector<ColorSpinorField*> y0(y.begin(), y.begin() + y.size()/2); std::vector<ColorSpinorField*> y1(y.begin() + y.size()/2, y.end()); std::vector<ColorSpinorField*> w0(w.begin(), w.begin() + w.size()/2); std::vector<ColorSpinorField*> w1(w.begin() + w.size()/2, w.end()); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result0, x, y0, z, w0, i_idx, 2*j_idx+0, hermitian, tile_size); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result1, x, y1, z, w1, i_idx, 2*j_idx+1, hermitian, tile_size); } else { double2* cdot = new double2[x.size()*y.size()]; // if at bottom of recursion, return if on lower left if (x.size() <= tile_size && hermitian) { if (j_idx < i_idx) { return; } } reduce::coeff_array<Complex> a, b, c; if (x.size() <= tile_size) { switch(x.size()){ // COMMENT HERE FOR COMPILE TIME case 1: reduce::multiReduceCuda<1,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 2 case 2: reduce::multiReduceCuda<2,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 3 case 3: reduce::multiReduceCuda<3,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 4 case 4: reduce::multiReduceCuda<4,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 5 case 5: reduce::multiReduceCuda<5,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 6 case 6: reduce::multiReduceCuda<6,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 7 case 7: reduce::multiReduceCuda<7,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 8 case 8: reduce::multiReduceCuda<8,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 9 case 9: reduce::multiReduceCuda<9,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 10 case 10: reduce::multiReduceCuda<10,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 11 case 11: reduce::multiReduceCuda<11,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 12 case 12: reduce::multiReduceCuda<12,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 13 case 13: reduce::multiReduceCuda<13,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 14 case 14: reduce::multiReduceCuda<14,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 15 case 15: reduce::multiReduceCuda<15,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 16 case 16: reduce::multiReduceCuda<16,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #endif //16 #endif //15 #endif //14 #endif //13 #endif //12 #endif //11 #endif //10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 } } else { // split the problem and recurse. Splitting in x requires // memory reshuffling (unless y = 1). // Use a few temporary variables. Complex* tmpmajor = new Complex[x.size()*y.size()]; Complex* result0 = &tmpmajor[0]; Complex* result1 = &tmpmajor[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); std::vector<ColorSpinorField*> z0(z.begin(), z.begin() + z.size()/2); std::vector<ColorSpinorField*> z1(z.begin() + z.size()/2, z.end()); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result0, x0, y, z0, w, 2*i_idx+0, j_idx, hermitian, tile_size); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result1, x1, y, z1, w, 2*i_idx+1, j_idx, hermitian, tile_size); const unsigned int xlen0 = x.size()/2; const unsigned int xlen1 = x.size() - xlen0; const unsigned int ylen = y.size(); // Copy back into result. int count = 0, count0 = 0, count1 = 0; for (unsigned int i = 0; i < ylen; i++) { for (unsigned int j = 0; j < xlen0; j++) result[count++] = result0[count0++]; for (unsigned int j = 0; j < xlen1; j++) result[count++] = result1[count1++]; } delete[] tmpmajor; } // we are at the leaf of the binary tree (e.g., we ran the kernel): perform the row-to-column-major transpose here. if (x.size() <= tile_size) { const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = 0; i < ylen; i++) result[i*xlen+j] = Complex(cdot[j*ylen + i].x, cdot[j*ylen+i].y); } delete[] cdot; } } template <template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerDiagonal, typename writeDiagonal, template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerOffDiagonal, typename writeOffDiagonal> class TileSizeTune : public Tunable { typedef std::vector<ColorSpinorField*> vec; Complex *result; vec &x, &y, &z, &w; bool hermitian; bool Anorm; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } unsigned int max_tile_size; public: TileSizeTune(Complex *result, vec &x, vec &y, vec &z, vec &w, bool hermitian, bool Anorm = false) : result(result), x(x), y(y), z(z), w(w), hermitian(hermitian), Anorm(Anorm), max_tile_size(1) { strcpy(aux, "policy,"); strcat(aux, x[0]->AuxString()); strcat(aux, ","); strcat(aux, y[0]->AuxString()); if (hermitian) strcat(aux, ",hermitian"); if (Anorm) strcat(aux, ",Anorm"); strcat(aux,",n="); char size[8]; u64toa(size, x.size()); strcat(aux,size); strcat(aux,",m="); u64toa(size, y.size()); strcat(aux,size); // before we do policy tuning we must ensure the kernel // constituents have been tuned since we can't do nested tuning // FIXME this will break if the kernels are destructive - which they aren't here if (getTuning() && getTuneCache().find(tuneKey()) == getTuneCache().end()) { disableProfileCount(); // purely for profiling reasons, don't want to profile tunings. if ( x.size()==1 || y.size()==1 ) { // 1-d reduction max_tile_size = ::min(MAX_MULTI_BLAS_N, (int)::max(x.size(), y.size())); // Make sure constituents are tuned. for ( unsigned int tile_size=1; tile_size <= max_tile_size; tile_size++) { multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal> (result, x, y, z, w, 0, 0, hermitian, tile_size); } } else { // 2-d reduction // max_tile_size should be set to the largest power of 2 less than // MAX_MULTI_BLAS_N, since we have a requirement that the // tile size is a power of 2. unsigned int max_count = 0; unsigned int tile_size_tmp = MAX_MULTI_BLAS_N; while (tile_size_tmp != 1) { tile_size_tmp = tile_size_tmp >> 1; max_count++; } tile_size_tmp = 1; for (unsigned int i = 0; i < max_count; i++) { tile_size_tmp = tile_size_tmp << 1; } max_tile_size = tile_size_tmp; // Make sure constituents are tuned. for ( unsigned int tile_size=1; tile_size <= max_tile_size && tile_size <= x.size() && (tile_size <= y.size() || y.size()==1) ; tile_size*=2) { multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal> (result, x, y, z, w, 0, 0, hermitian, tile_size); } } enableProfileCount(); setPolicyTuning(true); } } virtual ~TileSizeTune() { setPolicyTuning(false); } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); // tp.aux.x is where the tile size is stored. "tp" is the tuning struct. // it contains blocksize, grid size, etc. Since we're only tuning // a policy, we don't care about those sizes. That's why we only // tune "aux.x", which is the tile size. multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal> (result, x, y, z, w, 0, 0, hermitian, tp.aux.x); } // aux.x is the tile size bool advanceAux(TuneParam &param) const { if ( x.size()==1 || y.size()==1 ) { // 1-d reduction param.aux.x++; if ( (unsigned int)param.aux.x <= max_tile_size ) { return true; } else { param.aux.x = 1; return false; } } else { // 2-d reduction param.aux.x *= 2; // only tune powers of two (FIXME) if ( (unsigned int)param.aux.x <= max_tile_size && param.aux.x <= (int)x.size() && param.aux.x <= (int)y.size() ) { return true; } else { param.aux.x = 1; // reset to the beginning (which we'd need for multi-dimensional tuning) return false; } } } bool advanceTuneParam(TuneParam &param) const { return advanceAux(param); } void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.aux.x = 1; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); // default is max tile size // max_tile_size is MAX_MULTI_BLAS_N rounded down to the nearest power of 2. param.aux.x = max_tile_size; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } TuneKey tuneKey() const { return TuneKey(x[0]->VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } // FIXME long long bytes() const { return 0; } // FIXME void preTune() { } // FIXME - use write to determine what needs to be saved void postTune() { } // FIXME - use write to determine what needs to be saved }; void cDotProduct(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; // cDotProduct_recurse returns a column-major matrix. // To be consistent with the multi-blas functions, we should // switch this to row-major. TileSizeTune<Cdot,write<0,0,0,0>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, false); tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // Switch from col-major to row-major const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = 0; i < ylen; i++) result[j*ylen+i] = result_tmp[i*xlen + j]; delete[] result_tmp; } void hDotProduct(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); if (x.size() != y.size()) errorQuda("Cannot call Hermitian block dot product on non-square inputs"); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; TileSizeTune<Cdot,write<0,0,0,0>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, true, false); // last false is b/c L2 norm tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // FIXME - could optimize this for Hermiticity as well // Switch from col-major to row-major const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = j; i < ylen; i++) { result[j*ylen+i] = result_tmp[i*xlen + j]; result[i*ylen+j] = conj(result_tmp[i*xlen + j]); } delete[] result_tmp; } // for (p, Ap) norms in CG which are Hermitian. void hDotProduct_Anorm(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); if (x.size() != y.size()) errorQuda("Cannot call Hermitian block A-norm dot product on non-square inputs"); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; TileSizeTune<Cdot,write<0,0,0,0>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, true, true); // last true is b/c A norm tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // FIXME - could optimize this for Hermiticity as well // Switch from col-major to row-major const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = j; i < ylen; i++) { result[j*ylen+i] = result_tmp[i*xlen + j]; result[i*ylen+j] = conj(result_tmp[i*xlen + j]); } delete[] result_tmp; } // takes the outer product of inner products between and y and copies y into z void cDotProductCopy(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y, std::vector<ColorSpinorField*>&z){ #if 0 if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); if (y.size() != z.size()) errorQuda("Cannot copy input y of size %lu into z of size %lu\n", y.size(), z.size()); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; // When recursing, only the diagonal tiles will do the copy, the rest just do the outer product TileSizeTune<CdotCopy,write<0,0,0,1>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, true); tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // Switch from col-major to row-major. const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = 0; i < ylen; i++) result[j*ylen+i] = result_tmp[i*xlen + j]; delete[] result_tmp; #else errorQuda("cDotProductCopy not enabled"); #endif } } // namespace blas } // namespace quda
eb52241f7b435947b733742871446aa499797773.cu
#include <blas_quda.h> #include <tune_quda.h> #include <float_vector.h> #include <color_spinor_field_order.h> #include <uint_to_char.h> //#define QUAD_SUM #ifdef QUAD_SUM #include <dbldbl.h> #endif #include <cub_helper.cuh> template<typename> struct ScalarType { }; template<> struct ScalarType<double> { typedef double type; }; template<> struct ScalarType<double2> { typedef double type; }; template<> struct ScalarType<double3> { typedef double type; }; template<typename> struct Vec2Type { }; template<> struct Vec2Type<double> { typedef double2 type; }; #ifdef QUAD_SUM #define QudaSumFloat doubledouble #define QudaSumFloat2 doubledouble2 #define QudaSumFloat3 doubledouble3 template<> struct ScalarType<doubledouble> { typedef doubledouble type; }; template<> struct ScalarType<doubledouble2> { typedef doubledouble type; }; template<> struct ScalarType<doubledouble3> { typedef doubledouble type; }; template<> struct Vec2Type<doubledouble> { typedef doubledouble2 type; }; #else #define QudaSumFloat double #define QudaSumFloat2 double2 #define QudaSumFloat3 double3 #endif // work around for Fermi #if (__COMPUTE_CAPABILITY__ < 300) #undef MAX_MULTI_BLAS_N #define MAX_MULTI_BLAS_N 2 #endif static void checkSpinor(const ColorSpinorField &a, const ColorSpinorField &b) { if (a.Length() != b.Length()) errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); if (a.Stride() != b.Stride()) errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); } static struct { const char *vol_str; const char *aux_str; char aux_tmp[quda::TuneKey::aux_n]; } blasStrings; namespace quda { // hooks into tune.cpp variables for policy tuning typedef std::map<TuneKey, TuneParam> map; const map& getTuneCache(); void disableProfileCount(); void enableProfileCount(); void setPolicyTuning(bool); namespace blas { cudaStream_t* getStream(); cudaEvent_t* getReduceEvent(); template <int writeX, int writeY, int writeZ, int writeW> struct write { static constexpr int X = writeX; static constexpr int Y = writeY; static constexpr int Z = writeZ; static constexpr int W = writeW; }; namespace reduce { namespace multi { #define BLAS_SPINOR // do not include ghost functions in Spinor class to reduce parameter space overhead #include <texture.h> } #include <multi_reduce_core.cuh> #include <multi_reduce_core.h> } // namespace reduce /** Base class from which all reduction functors should derive. */ template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct MultiReduceFunctor { //! pre-computation routine called before the "M-loop" virtual __device__ __host__ void pre() { ; } //! where the reduction is usually computed and any auxiliary operations virtual __device__ __host__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) = 0; //! post-computation routine called after the "M-loop" virtual __device__ __host__ void post(ReduceType &sum) { ; } }; /** Return the real dot product of x and y Broken at the moment---need to update reDotProduct with permuting, etc of cDotProduct below. */ template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const double2 &a, const double2 &b) { sum += (ReduceType)a.x*(ReduceType)b.x; sum += (ReduceType)a.y*(ReduceType)b.y; } template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float2 &a, const float2 &b) { sum += (ReduceType)a.x*(ReduceType)b.x; sum += (ReduceType)a.y*(ReduceType)b.y; } template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float4 &a, const float4 &b) { sum += (ReduceType)a.x*(ReduceType)b.x; sum += (ReduceType)a.y*(ReduceType)b.y; sum += (ReduceType)a.z*(ReduceType)b.z; sum += (ReduceType)a.w*(ReduceType)b.w; } template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct Dot : public MultiReduceFunctor<NXZ, ReduceType, Float2, FloatN> { typedef typename scalar<Float2>::type real; const int NYW; Dot(const reduce::coeff_array<Complex> &a, const reduce::coeff_array<Complex> &b, const reduce::coeff_array<Complex> &c, int NYW) : NYW(NYW) { ; } __device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) { dot_<ReduceType>(sum,x,y); } static int streams() { return 2; } //! total number of input and output streams static int flops() { return 2; } //! flops per element }; void reDotProduct(double* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ #ifndef SSTEP errorQuda("S-step code not built\n"); #else switch(x.size()){ case 1: reduce::multiReduceCuda<1,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 2: reduce::multiReduceCuda<2,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 3: reduce::multiReduceCuda<3,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 4: reduce::multiReduceCuda<4,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 5: reduce::multiReduceCuda<5,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 6: reduce::multiReduceCuda<6,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 7: reduce::multiReduceCuda<7,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 8: reduce::multiReduceCuda<8,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; /*case 9: reduce::multiReduceCuda<9,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 10: reduce::multiReduceCuda<10,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 11: reduce::multiReduceCuda<11,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 12: reduce::multiReduceCuda<12,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 13: reduce::multiReduceCuda<13,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 14: reduce::multiReduceCuda<14,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 15: reduce::multiReduceCuda<15,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break; case 16: reduce::multiReduceCuda<16,double,QudaSumFloat,Dot,0,0,0,0,false> (result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, y); break;*/ default: errorQuda("Unsupported vector size"); break; } #endif // SSTEP // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = x.size()*y.size(); reduceDoubleArray((double*)result, Nreduce); } /** Returns complex-valued dot product of x and y */ template<typename ReduceType> __device__ __host__ void cdot_(ReduceType &sum, const double2 &a, const double2 &b) { typedef typename ScalarType<ReduceType>::type scalar; sum.x += (scalar)a.x*(scalar)b.x; sum.x += (scalar)a.y*(scalar)b.y; sum.y += (scalar)a.x*(scalar)b.y; sum.y -= (scalar)a.y*(scalar)b.x; } template<typename ReduceType> __device__ __host__ void cdot_(ReduceType &sum, const float2 &a, const float2 &b) { typedef typename ScalarType<ReduceType>::type scalar; sum.x += (scalar)a.x*(scalar)b.x; sum.x += (scalar)a.y*(scalar)b.y; sum.y += (scalar)a.x*(scalar)b.y; sum.y -= (scalar)a.y*(scalar)b.x; } template<typename ReduceType> __device__ __host__ void cdot_(ReduceType &sum, const float4 &a, const float4 &b) { typedef typename ScalarType<ReduceType>::type scalar; sum.x += (scalar)a.x*(scalar)b.x; sum.x += (scalar)a.y*(scalar)b.y; sum.x += (scalar)a.z*(scalar)b.z; sum.x += (scalar)a.w*(scalar)b.w; sum.y += (scalar)a.x*(scalar)b.y; sum.y -= (scalar)a.y*(scalar)b.x; sum.y += (scalar)a.z*(scalar)b.w; sum.y -= (scalar)a.w*(scalar)b.z; } template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct Cdot : public MultiReduceFunctor<NXZ, ReduceType, Float2, FloatN> { typedef typename scalar<Float2>::type real; const int NYW; Cdot(const reduce::coeff_array<Complex> &a, const reduce::coeff_array<Complex> &b, const reduce::coeff_array<Complex> &c, int NYW) : NYW(NYW) { ; } __device__ __host__ inline void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) { cdot_<ReduceType>(sum,x,y); } static int streams() { return 2; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; template <int NXZ, typename ReduceType, typename Float2, typename FloatN> struct CdotCopy : public MultiReduceFunctor<NXZ, ReduceType, Float2, FloatN> { typedef typename scalar<Float2>::type real; const int NYW; CdotCopy(const reduce::coeff_array<Complex> &a, const reduce::coeff_array<Complex> &b, const reduce::coeff_array<Complex> &c, int NYW) : NYW(NYW) { ; } __device__ __host__ inline void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, const int i, const int j) { cdot_<ReduceType>(sum,x,y); if (i==j) w = y;} static int streams() { return 2; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; // This function does the outer product of dot products... in column major. // There's a function below called 'cDotProduct' that flips it to row major. template <template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerDiagonal, typename writeDiagonal, template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerOffDiagonal, typename writeOffDiagonal> void multiReduce_recurse(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y, std::vector<ColorSpinorField*>&z, std::vector<ColorSpinorField*>&w, int i_idx, int j_idx, bool hermitian, unsigned int tile_size) { if (y.size() > tile_size) // if greater than max single-kernel size, split and recurse { // Do the recurse first. Complex* result0 = &result[0]; Complex* result1 = &result[x.size()*(y.size()/2)]; std::vector<ColorSpinorField*> y0(y.begin(), y.begin() + y.size()/2); std::vector<ColorSpinorField*> y1(y.begin() + y.size()/2, y.end()); std::vector<ColorSpinorField*> w0(w.begin(), w.begin() + w.size()/2); std::vector<ColorSpinorField*> w1(w.begin() + w.size()/2, w.end()); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result0, x, y0, z, w0, i_idx, 2*j_idx+0, hermitian, tile_size); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result1, x, y1, z, w1, i_idx, 2*j_idx+1, hermitian, tile_size); } else { double2* cdot = new double2[x.size()*y.size()]; // if at bottom of recursion, return if on lower left if (x.size() <= tile_size && hermitian) { if (j_idx < i_idx) { return; } } reduce::coeff_array<Complex> a, b, c; if (x.size() <= tile_size) { switch(x.size()){ // COMMENT HERE FOR COMPILE TIME case 1: reduce::multiReduceCuda<1,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 2 case 2: reduce::multiReduceCuda<2,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 3 case 3: reduce::multiReduceCuda<3,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 4 case 4: reduce::multiReduceCuda<4,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 5 case 5: reduce::multiReduceCuda<5,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 6 case 6: reduce::multiReduceCuda<6,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 7 case 7: reduce::multiReduceCuda<7,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 8 case 8: reduce::multiReduceCuda<8,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 9 case 9: reduce::multiReduceCuda<9,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 10 case 10: reduce::multiReduceCuda<10,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 11 case 11: reduce::multiReduceCuda<11,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 12 case 12: reduce::multiReduceCuda<12,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 13 case 13: reduce::multiReduceCuda<13,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 14 case 14: reduce::multiReduceCuda<14,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 15 case 15: reduce::multiReduceCuda<15,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #if MAX_MULTI_BLAS_N >= 16 case 16: reduce::multiReduceCuda<16,double2,QudaSumFloat2,ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal,false> (cdot, a, b, c, x, y, z, w, i_idx, j_idx ); break; #endif //16 #endif //15 #endif //14 #endif //13 #endif //12 #endif //11 #endif //10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 } } else { // split the problem and recurse. Splitting in x requires // memory reshuffling (unless y = 1). // Use a few temporary variables. Complex* tmpmajor = new Complex[x.size()*y.size()]; Complex* result0 = &tmpmajor[0]; Complex* result1 = &tmpmajor[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); std::vector<ColorSpinorField*> z0(z.begin(), z.begin() + z.size()/2); std::vector<ColorSpinorField*> z1(z.begin() + z.size()/2, z.end()); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result0, x0, y, z0, w, 2*i_idx+0, j_idx, hermitian, tile_size); multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal>(result1, x1, y, z1, w, 2*i_idx+1, j_idx, hermitian, tile_size); const unsigned int xlen0 = x.size()/2; const unsigned int xlen1 = x.size() - xlen0; const unsigned int ylen = y.size(); // Copy back into result. int count = 0, count0 = 0, count1 = 0; for (unsigned int i = 0; i < ylen; i++) { for (unsigned int j = 0; j < xlen0; j++) result[count++] = result0[count0++]; for (unsigned int j = 0; j < xlen1; j++) result[count++] = result1[count1++]; } delete[] tmpmajor; } // we are at the leaf of the binary tree (e.g., we ran the kernel): perform the row-to-column-major transpose here. if (x.size() <= tile_size) { const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = 0; i < ylen; i++) result[i*xlen+j] = Complex(cdot[j*ylen + i].x, cdot[j*ylen+i].y); } delete[] cdot; } } template <template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerDiagonal, typename writeDiagonal, template <int MXZ, typename ReducerType, typename Float, typename FloatN> class ReducerOffDiagonal, typename writeOffDiagonal> class TileSizeTune : public Tunable { typedef std::vector<ColorSpinorField*> vec; Complex *result; vec &x, &y, &z, &w; bool hermitian; bool Anorm; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } unsigned int max_tile_size; public: TileSizeTune(Complex *result, vec &x, vec &y, vec &z, vec &w, bool hermitian, bool Anorm = false) : result(result), x(x), y(y), z(z), w(w), hermitian(hermitian), Anorm(Anorm), max_tile_size(1) { strcpy(aux, "policy,"); strcat(aux, x[0]->AuxString()); strcat(aux, ","); strcat(aux, y[0]->AuxString()); if (hermitian) strcat(aux, ",hermitian"); if (Anorm) strcat(aux, ",Anorm"); strcat(aux,",n="); char size[8]; u64toa(size, x.size()); strcat(aux,size); strcat(aux,",m="); u64toa(size, y.size()); strcat(aux,size); // before we do policy tuning we must ensure the kernel // constituents have been tuned since we can't do nested tuning // FIXME this will break if the kernels are destructive - which they aren't here if (getTuning() && getTuneCache().find(tuneKey()) == getTuneCache().end()) { disableProfileCount(); // purely for profiling reasons, don't want to profile tunings. if ( x.size()==1 || y.size()==1 ) { // 1-d reduction max_tile_size = std::min(MAX_MULTI_BLAS_N, (int)std::max(x.size(), y.size())); // Make sure constituents are tuned. for ( unsigned int tile_size=1; tile_size <= max_tile_size; tile_size++) { multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal> (result, x, y, z, w, 0, 0, hermitian, tile_size); } } else { // 2-d reduction // max_tile_size should be set to the largest power of 2 less than // MAX_MULTI_BLAS_N, since we have a requirement that the // tile size is a power of 2. unsigned int max_count = 0; unsigned int tile_size_tmp = MAX_MULTI_BLAS_N; while (tile_size_tmp != 1) { tile_size_tmp = tile_size_tmp >> 1; max_count++; } tile_size_tmp = 1; for (unsigned int i = 0; i < max_count; i++) { tile_size_tmp = tile_size_tmp << 1; } max_tile_size = tile_size_tmp; // Make sure constituents are tuned. for ( unsigned int tile_size=1; tile_size <= max_tile_size && tile_size <= x.size() && (tile_size <= y.size() || y.size()==1) ; tile_size*=2) { multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal> (result, x, y, z, w, 0, 0, hermitian, tile_size); } } enableProfileCount(); setPolicyTuning(true); } } virtual ~TileSizeTune() { setPolicyTuning(false); } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); // tp.aux.x is where the tile size is stored. "tp" is the tuning struct. // it contains blocksize, grid size, etc. Since we're only tuning // a policy, we don't care about those sizes. That's why we only // tune "aux.x", which is the tile size. multiReduce_recurse<ReducerDiagonal,writeDiagonal,ReducerOffDiagonal,writeOffDiagonal> (result, x, y, z, w, 0, 0, hermitian, tp.aux.x); } // aux.x is the tile size bool advanceAux(TuneParam &param) const { if ( x.size()==1 || y.size()==1 ) { // 1-d reduction param.aux.x++; if ( (unsigned int)param.aux.x <= max_tile_size ) { return true; } else { param.aux.x = 1; return false; } } else { // 2-d reduction param.aux.x *= 2; // only tune powers of two (FIXME) if ( (unsigned int)param.aux.x <= max_tile_size && param.aux.x <= (int)x.size() && param.aux.x <= (int)y.size() ) { return true; } else { param.aux.x = 1; // reset to the beginning (which we'd need for multi-dimensional tuning) return false; } } } bool advanceTuneParam(TuneParam &param) const { return advanceAux(param); } void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.aux.x = 1; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); // default is max tile size // max_tile_size is MAX_MULTI_BLAS_N rounded down to the nearest power of 2. param.aux.x = max_tile_size; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } TuneKey tuneKey() const { return TuneKey(x[0]->VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } // FIXME long long bytes() const { return 0; } // FIXME void preTune() { } // FIXME - use write to determine what needs to be saved void postTune() { } // FIXME - use write to determine what needs to be saved }; void cDotProduct(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; // cDotProduct_recurse returns a column-major matrix. // To be consistent with the multi-blas functions, we should // switch this to row-major. TileSizeTune<Cdot,write<0,0,0,0>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, false); tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // Switch from col-major to row-major const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = 0; i < ylen; i++) result[j*ylen+i] = result_tmp[i*xlen + j]; delete[] result_tmp; } void hDotProduct(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); if (x.size() != y.size()) errorQuda("Cannot call Hermitian block dot product on non-square inputs"); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; TileSizeTune<Cdot,write<0,0,0,0>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, true, false); // last false is b/c L2 norm tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // FIXME - could optimize this for Hermiticity as well // Switch from col-major to row-major const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = j; i < ylen; i++) { result[j*ylen+i] = result_tmp[i*xlen + j]; result[i*ylen+j] = conj(result_tmp[i*xlen + j]); } delete[] result_tmp; } // for (p, Ap) norms in CG which are Hermitian. void hDotProduct_Anorm(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y){ if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); if (x.size() != y.size()) errorQuda("Cannot call Hermitian block A-norm dot product on non-square inputs"); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; TileSizeTune<Cdot,write<0,0,0,0>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, true, true); // last true is b/c A norm tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // FIXME - could optimize this for Hermiticity as well // Switch from col-major to row-major const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = j; i < ylen; i++) { result[j*ylen+i] = result_tmp[i*xlen + j]; result[i*ylen+j] = conj(result_tmp[i*xlen + j]); } delete[] result_tmp; } // takes the outer product of inner products between and y and copies y into z void cDotProductCopy(Complex* result, std::vector<ColorSpinorField*>& x, std::vector<ColorSpinorField*>& y, std::vector<ColorSpinorField*>&z){ #if 0 if (x.size() == 0 || y.size() == 0) errorQuda("vector.size() == 0"); if (y.size() != z.size()) errorQuda("Cannot copy input y of size %lu into z of size %lu\n", y.size(), z.size()); Complex* result_tmp = new Complex[x.size()*y.size()]; for (unsigned int i = 0; i < x.size()*y.size(); i++) result_tmp[i] = 0.0; // When recursing, only the diagonal tiles will do the copy, the rest just do the outer product TileSizeTune<CdotCopy,write<0,0,0,1>,Cdot,write<0,0,0,0> > tile(result_tmp, x, y, x, y, true); tile.apply(0); // do a single multi-node reduction only once we have computed all local dot products const int Nreduce = 2*x.size()*y.size(); reduceDoubleArray((double*)result_tmp, Nreduce); // Switch from col-major to row-major. const unsigned int xlen = x.size(); const unsigned int ylen = y.size(); for (unsigned int j = 0; j < xlen; j++) for (unsigned int i = 0; i < ylen; i++) result[j*ylen+i] = result_tmp[i*xlen + j]; delete[] result_tmp; #else errorQuda("cDotProductCopy not enabled"); #endif } } // namespace blas } // namespace quda
1489d2ece35c0eb53288a3f6834bf68c15a9470d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <iostream> #include <cstdlib> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <ctime> #include <fstream> //Analiza las propiedades de la tarjeta grafica para devolver el tamao adecuado de tile, tambien trata el tamao del tablero int obtenerTileWidth(int anchura, int altura) { float min_medida = 0; if (anchura > altura) min_medida = anchura; else min_medida = altura; hipDeviceProp_t propiedades; hipGetDeviceProperties(&propiedades, 0); int max_threads = propiedades.maxThreadsPerBlock; if (anchura == altura) { //Si la matriz es cuadrada, para no tener 1 solo bloque if (min_medida / 32 > 1 && max_threads == 1024) { //Solo si tiene 1024 hilos por bloque podra ser de 32x32 return 32; } else if (min_medida / 16 > 1) { return 16; } else if (min_medida / 8 > 1) { return 8; } else if (min_medida / 4 > 1) { return 4; } else if (min_medida / 2 > 1) { return 2; } } else { //si la matriz no es cuadrada if (min_medida / 32 >= 1 && max_threads == 1024) { return 32; } else if (min_medida / 16 >= 1) { return 16; } else if (min_medida / 8 >= 1) { return 8; } else if (min_medida / 4 >= 1) { return 4; } else if (min_medida / 2 >= 1) { return 2; } } return -1; } //funcion para generar una jewel aleatoria, como la generacion inicial. /* Funciones para generar gemas aleatorias */ /* Iniciador de seeds */ __global__ void setup_kernel(hiprandState_t * state, unsigned long seed) { int id = threadIdx.x; hiprand_init(seed, id, 0, &state[id]); } /* Crear jewel usando globalState */ __device__ float generate(hiprandState_t* globalState, int ind) { hiprandState_t localState = globalState[ind]; float RANDOM = hiprand_uniform(&localState); globalState[ind] = localState; return RANDOM; } /* Funcion para generarJewel en CUDA */ __device__ int generarJewelCUDA(hiprandState_t* globalState, int ind, int dificultad) { switch (dificultad) { case 1: { return (int)1 + generate(globalState, ind) * 4; } case 2: { return (int)1 + generate(globalState, ind) * 6; } case 3: { return (int)1 + generate(globalState, ind) * 8; } } return -1; } /* Funcion para inicializar la matriz de gemas */ __global__ void generacionInicialRandomJewels(float *tablero, int dificultad, int anchura, int altura, int TILE_WIDTH, hiprandState_t* globalState) { int tFila = blockIdx.y*TILE_WIDTH + threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; if (tFila < altura) { if (tColumna < anchura) { tablero[tFila*anchura + tColumna] = generarJewelCUDA(globalState, tFila * anchura + tColumna, dificultad); } } } void printTablero(float* tablero, int anchura, int altura) { for (int i = altura - 1; i >= 0; i--) { printf("\n"); for (int j = 0; j < anchura; j++) { printf("%d ", (int)tablero[j + i*anchura]); } } printf("\n"); } /*Recibe las coordenadas de las jewels a eliminar y mueve las filas que tiene que bajar a partir de ellas, emplea una copia del tablero para evitar race conditions*/ __global__ void eliminarJewelsKernel(float* tablero_d, float* tablero_aux_d, float* jewels_eliminadas_d, int dificultad, int anchura, int altura, int final, int TILE_WIDTH, hiprandState_t* globalState) { int tx = threadIdx.x; int ty = threadIdx.y; int block_x = blockIdx.x; int block_y = blockIdx.y; //Posicion real dentro del tablero tx += block_x * TILE_WIDTH; ty += block_y * TILE_WIDTH; if (jewels_eliminadas_d[0] != jewels_eliminadas_d[2] && tx >= jewels_eliminadas_d[0] && tx <= jewels_eliminadas_d[final - 2] && ty >= jewels_eliminadas_d[1]) { if (ty + 1 < altura) { float value = tablero_aux_d[tx + (ty + 1)*anchura]; tablero_d[tx + (ty)*(anchura)] = value; } else { tablero_d[tx + ty*anchura] = generarJewelCUDA(globalState, tx + ty*anchura, dificultad); } } else { if (ty < altura && tx == jewels_eliminadas_d[0] && ty > jewels_eliminadas_d[1]) { float value = tablero_aux_d[tx + (ty)*anchura]; tablero_d[tx + (ty - final / 2)*(anchura)] = value; } if (ty >= altura - final / 2 && ty < altura && tx == jewels_eliminadas_d[0]) { tablero_d[tx + (ty)*anchura] = generarJewelCUDA(globalState, tx + ty*anchura, dificultad); } } } /*Funcion que prepara y llama el kernel con su mismo nombre, genera todos los datos necesarios*/ void eliminarJewels(float* tablero, float* jewels_eliminadas, int dificultad, int anchura, int altura, int TILE_WIDTH, hiprandState_t* globalState) { float *tablero_d; float *jewels_eliminadas_d; float *tablero_aux_d; int size = anchura * altura * sizeof(float); int max = 0; //Para saber que medida es la ms grande, ya que no se pueden eliminar ms jewels seguidas que esa medida if (altura >= anchura) max = altura; else max = anchura; //Tablero a GPU y la copia del tablero hipMalloc((void**)&tablero_d, size); hipMemcpy(tablero_d, tablero, size, hipMemcpyHostToDevice); hipMalloc((void**)&tablero_aux_d, size); hipMemcpy(tablero_aux_d, tablero, size, hipMemcpyHostToDevice); //Jewels a eliminar a GPU. 2*max ya que cada posicion son dos coordenadas, x e y hipMalloc((void**)&jewels_eliminadas_d, 2 * max * sizeof(float)); hipMemcpy(jewels_eliminadas_d, jewels_eliminadas, 2 * max * sizeof(float), hipMemcpyHostToDevice); int final = 0; bool modif = false; //Calcula cual es el ultimo valor escrito de las jewels a eliminar, ya que puede haber posiciones no escritas for (int i = 0; i < max * 2; i++) { if (jewels_eliminadas[i] < 0) { final = i; modif = true; break; } } //En caso de que este completamente escrito if (!modif) final = max * 2; //Cantidad de bloques de ancho de medida TILE_WIDTH int anch = ceil(((double)anchura) / TILE_WIDTH); //Cantidad de bloques de alto con medida TILE_WIDTH int alt = ceil(((double)altura) / TILE_WIDTH); //Configuracion de ejecucion dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); eliminarJewelsKernel << <dimGrid, dimBlock >> > (tablero_d, tablero_aux_d, jewels_eliminadas_d, dificultad, anchura, altura, final, TILE_WIDTH, globalState); //Se recupera el tablero actualizado hipMemcpy(tablero, tablero_d, size, hipMemcpyDeviceToHost); //Libera memoria hipFree(tablero_d); hipFree(jewels_eliminadas_d); hipFree(tablero_aux_d); } /*Escribe en un tablero auxiliar la cantidad de jewels que se eliminarian moviendo una jewel (x,y) hacia la derecha paralelizable ya que todos los hilos (cada hilo 1 jewel) tienen que expandirse hacia la derecha para ver hasta donde llegarian a eliminar*/ __global__ void analisisTableroAutomaticoKernel(float *tablero_d, float *aux_d, int dificultad, int anchura, int altura, int TILE_WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int block_x = blockIdx.x; int block_y = blockIdx.y; //Posicion real dentro del tablero tx += block_x * TILE_WIDTH; ty += block_y * TILE_WIDTH; int jewels_posibles_der = 0; //Si tiene por la derecha if ((tx + 2) < anchura) { if (((tx + 2) + ty*anchura <= altura*anchura) && tablero_d[tx + 2 + ty*anchura] == tablero_d[tx + ty*anchura]) { int i = 2; //Se expande while ((tx + i + ty*anchura <= altura*anchura) && tablero_d[tx + i + ty*anchura] == tablero_d[tx + ty*anchura]) { jewels_posibles_der++; i++; } aux_d[tx + ty*anchura] = jewels_posibles_der + 1; } else { aux_d[tx + ty*anchura] = 1; } } else { aux_d[tx + ty*anchura] = 1; } //printf("%i-%f ", tx + ty*anchura, aux_d[tx + ty*anchura]); } //Analiza el movimiento manual, usando las coordenadas de la nueva posicion de la jewel seleccionada void analisisTableroManual(int dificultad, float* tablero, int anchura, int altura, int x, int y, int TILE_WIDTH, hiprandState_t* globalState) { int max = 0; int size = anchura*altura; if (altura >= anchura) max = altura; else max = anchura; //Solo se eliminan MAX jewels como mucho, se guardan sus x e y float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float)); //Se inicializa a -1 ra saber hasta que punto se escribe for (int i = 0; i < max; i++) { jewels_eliminadas[i] = -1; } int jewels_posibles_izq = 0; int jewels_posibles_der = 0; //Si tiene por la izquierda if ((x - 1 + y*anchura >= 0) && tablero[x - 1 + y*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x - i + y*anchura >= 0) && (x - i >= 0) && tablero[x - i + y*anchura] == tablero[x + y*anchura]) { jewels_posibles_izq++; i++; } } //Si tiene por la derecha if ((x + 1 + y*anchura <= size) && tablero[x + 1 + y*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x + i + y*anchura <= size) && (x + i < anchura) && tablero[x + i + y*anchura] == tablero[x + y*anchura]) { jewels_posibles_der++; i++; } } //Se pueden eliminar horizontalmente, las coloca en orden para facilitar su eliminacion if (1 + jewels_posibles_izq + jewels_posibles_der >= 3) { int salto = 0; for (int j = jewels_posibles_izq; j >= (1); j--) { jewels_eliminadas[salto] = x - j; jewels_eliminadas[salto + 1] = y; salto += 2; } jewels_eliminadas[jewels_posibles_izq * 2] = x; jewels_eliminadas[jewels_posibles_izq * 2 + 1] = y; salto = 2; for (int k = 1; k <= jewels_posibles_der; k++) { jewels_eliminadas[salto + jewels_posibles_izq * 2] = x + k; jewels_eliminadas[salto + jewels_posibles_izq * 2 + 1] = y; salto += 2; } } else { //Analizamos la vertical int jewels_posibles_arrib = 0; int jewels_posibles_abaj = 0; //Si tiene por abajo if ((x + (y - 1)*anchura >= 0) && tablero[x + (y - 1)*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x + (y - i)*anchura >= 0) && tablero[x + (y - i)*anchura] == tablero[x + y*anchura]) { jewels_posibles_abaj++; i++; } } //Si tiene por arriba if ((x + 1 + y*anchura <= size) && tablero[x + (y + 1)*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x + (y + i)*anchura <= size) && tablero[x + (y + i)*anchura] == tablero[x + y*anchura]) { jewels_posibles_arrib++; i++; } } //Se pueden eliminar if (1 + jewels_posibles_abaj + jewels_posibles_arrib >= 3) { int salto = 0; for (int j = jewels_posibles_abaj; j >= (1); j--) { jewels_eliminadas[salto] = x; jewels_eliminadas[salto + 1] = y - j; salto += 2; } jewels_eliminadas[jewels_posibles_abaj * 2] = x; jewels_eliminadas[jewels_posibles_abaj * 2 + 1] = y; salto = 2; for (int k = 1; k <= jewels_posibles_arrib; k++) { jewels_eliminadas[salto + jewels_posibles_abaj * 2] = x; jewels_eliminadas[salto + jewels_posibles_abaj * 2 + 1] = y + k; salto += 2; } } } //Las elimina eliminarJewels(tablero, jewels_eliminadas, dificultad, anchura, altura, TILE_WIDTH, globalState); free(jewels_eliminadas); } //Intercambia la jewel seleccionadas con la jewel en la direccin indicada void intercambiarPosiciones(float* tablero, int jewel1_x, int jewel1_y, int direccion, int anchura, int altura, int seleccion, int dificultad, int TILE_WIDTH, hiprandState_t* globalState) { int jewel2_x = jewel1_x; int jewel2_y = jewel1_y; switch (direccion) { case 1: //Arriba { jewel2_y += 1; break; } case 2: //Abajo { jewel2_y -= 1; break; } case 3: //Izquierda { jewel2_x -= 1; break; } case 4: //Derecha { jewel2_x += 1; break; } } int aux1; aux1 = tablero[jewel2_x + jewel2_y*anchura]; tablero[jewel2_x + jewel2_y*anchura] = tablero[jewel1_x + jewel1_y*anchura]; tablero[jewel1_x + jewel1_y*anchura] = aux1; //Analiza el movimiento para ver si se pueden eliminar jewels analisisTableroManual(dificultad, tablero, anchura, altura, jewel2_x, jewel2_y, TILE_WIDTH, globalState); } //Analiza la mejor opcion y la ejecuta en funcion de lo que devuelve el kernel void analisisTableroAutomatico(int dificultad, float* tablero, int anchura, int altura, int TILE_WIDTH, hiprandState_t* globalState) { float *tablero_d; float *aux_d; float *aux; //Tamao del tablero para asignar memoria int size = anchura * altura * sizeof(float); int tam = anchura * altura; int max = 0; if (altura >= anchura) max = altura; else max = anchura; //Solo se eliminan max jewels, 2 coordenadas por jewel = 2 * max posiciones float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float)); aux = (float*)malloc(size); for (int i = 0; i < max; i++) { jewels_eliminadas[i] = -1; } //Solo se cuenta la jewel que se escoge, sigue siendo menor que 3 for (int p = 0; p < tam; p++) { aux[p] = 1; } //Tablero a GPU hipMalloc((void**)&tablero_d, size); hipMemcpy(tablero_d, tablero, size, hipMemcpyHostToDevice); //Auxiliar de conteo a GPU hipMalloc((void**)&aux_d, size); hipMemcpy(aux_d, aux, size, hipMemcpyHostToDevice); //Cantidad de bloques de ancho de medida TILE_WIDTH int anch = ceil(((double)anchura) / TILE_WIDTH); //Cantidad de bloques de alto con medida TILE_WIDTH int alt = ceil(((double)altura) / TILE_WIDTH); //Configuracion de ejecucion dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); //Inicio del kernel analisisTableroAutomaticoKernel << <dimGrid, dimBlock >> > (tablero_d, aux_d, dificultad, anchura, altura, TILE_WIDTH); //Transfiere el resultado de la GPU al host hipMemcpy(aux, aux_d, size, hipMemcpyDeviceToHost); int x_mejor = 0; int y_mejor = 0; int valor_mejor = 0; //Se busca el movimiento con el mayor numero de jewels eliminadas for (int y = 0; y < altura; y++) { for (int x = 0; x < anchura; x++) { if (aux[x + y*anchura] > valor_mejor) { valor_mejor = aux[x + y*anchura]; x_mejor = x; y_mejor = y; } } } //Si se pueden eliminar se ejecuta el movimiento, con lo que ello conlleva if (valor_mejor >= 3) { intercambiarPosiciones(tablero, x_mejor, y_mejor, 4, anchura, altura, 1, dificultad, TILE_WIDTH, globalState); } free(aux); free(jewels_eliminadas); hipFree(tablero_d); hipFree(aux_d); } bool precargar(int& anchura, int& altura, int& dificultad, char* fichero) { std::ifstream fAnchura("anchura.txt"); std::ifstream fAltura("altura.txt"); std::ifstream fDificultad("dificultad.txt"); std::ifstream fCarga(fichero); if (!fAnchura.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } if (!fAltura.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } if (!fDificultad.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } if (!fCarga.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } fAnchura >> anchura; fAltura >> altura; fDificultad >> dificultad; fAnchura.close(); fAltura.close(); fDificultad.close(); fCarga.close(); return true; } void cargar(int anchura, int altura, float* tablero, char* fichero) { int aux; char* array = (char*)malloc(anchura*altura + 1); std::ifstream fCarga(fichero); fCarga.getline(array, anchura*altura + 1); for (int i = 0; i < anchura*altura; i++) { aux = (array[i] - 48); tablero[i] = (float)aux; } free(array); fCarga.close(); } void guardado(float* tablero, int anchura, int altura, int dificultad, char* fichero) { //Sistema de guardado std::ofstream ficheroGuardado; std::ofstream ficheroAnchura; std::ofstream ficheroAltura; std::ofstream ficheroDificultad; /* Abrirlos */ ficheroGuardado.open(fichero); ficheroAnchura.open("Anchura.txt"); ficheroAltura.open("Altura.txt"); ficheroDificultad.open("Dificultad.txt"); /* Limpiar el contenido */ ficheroGuardado.clear(); ficheroAnchura.clear(); ficheroAltura.clear(); ficheroDificultad.clear(); /* Almacenar anchura y altura*/ ficheroAnchura << anchura; ficheroAltura << altura; ficheroDificultad << dificultad; /* Almacenar Resto */ for (int index = 0; index < anchura*altura; index++) { ficheroGuardado << tablero[index]; } ficheroGuardado.close(); ficheroAnchura.close(); ficheroAltura.close(); ficheroDificultad.close(); } /* Funcion que elimina una fila */ __global__ void bombaFila(float* tablero, int anchura, int altura, int dificultad, int fila, int TILE_WIDTH, hiprandState_t* globalState) { int tFila = blockIdx.y*TILE_WIDTH + threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; float aux; if ((tFila + fila) < altura) { if (tColumna < anchura) { if ((tFila + fila + 1) == altura) { tablero[(tFila + fila)*anchura + tColumna] = generarJewelCUDA(globalState, (tFila * 3 + tColumna), dificultad); } else { aux = tablero[(tFila + fila + 1)*anchura + tColumna]; tablero[(tFila + fila)*anchura + tColumna] = aux; } } } } /* Funcion que elimina una columna */ __global__ void bombaColumna(float* tablero, int anchura, int altura, int dificultad, int columna, int TILE_WIDTH, hiprandState_t* globalState) { int tFila = blockIdx.y*TILE_WIDTH +threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; if (tFila < altura) { if ((columna - tColumna) >= 0) { if ((columna - tColumna - 1) < 0) { tablero[(tFila*anchura) + (columna - tColumna)] = generarJewelCUDA(globalState, (tFila * 3 + tColumna), dificultad); } else { tablero[(tFila*anchura) + (columna - tColumna)] = tablero[(tFila*anchura) + (columna - tColumna - 1)]; } } } } __global__ void bombaRotarGPU(float* tablero, int anchura, int altura, int fila, int columna) { int tFila = threadIdx.y; int tColumna = threadIdx.x; if (tFila < 3) { if (tColumna < 3) { tablero[(fila + 1 - tColumna)*anchura + (columna - 1 + tFila)] = tablero[((fila + 1) - tFila)*anchura + ((columna + 1) - tColumna)]; } } } __global__ void bombaRotar(float* tablero_d, int anchura, int altura, int TILE_WIDTH) { int tFila = blockIdx.y*TILE_WIDTH + threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; if (tFila < altura && tColumna < anchura) { if ((tFila - 1) < 0 || (tFila + 1) >= altura || (tColumna - 1) < 0 || (tColumna + 1) >= anchura) { /* Se entra cuando no se puede rotar */ } else { if (tFila % 3 == 1 && tColumna % 3 == 1) { dim3 dimBlock(3, 3); dim3 dimGrid(1, 1); bombaRotarGPU << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, tFila, tColumna); } } } } int main(int argc, char** argv) { //Matriz de tamao variable de floats, un array de Altura*Anchura int anchura; int altura; int dificultad; char modo; int size; char ficheroGuardado[9] = "save.txt"; int seleccion; float* tablero; float* tablero_d; hiprandState_t* devStates; bool jugando = true; /* Valores por argumento*/ if (argc == 1) { std::cout << "Anchura del tablero: "; std::cin >> anchura; std::cout << "Altura del tablero: "; std::cin >> altura; std::cout << "Elija dificultad: \n1.-Facil \n2.-Media \n3.-Dificil\n"; std::cin >> dificultad; std::cout << "Automatico? 1.-SI 2.-NO\n"; std::cin >> seleccion; } else { modo = argv[1][1]; dificultad = atoi(argv[2]); anchura = atoi(argv[3]); altura = atoi(argv[4]); switch (modo) { case 'a': {seleccion = 1; break; } case 'm': {seleccion = 2; break; } default: printf("Valor no valido.\n"); return -1; } } size = anchura*altura; //Tamao de los bloques a crear en CUDA int TILE_WIDTH = obtenerTileWidth(anchura, altura); if (TILE_WIDTH == -1) { printf("ERROR: TILE_WIDTH no valido"); return 0; } //Cantidad de bloques de ancho de medida TILE_WIDTH int anch = ceil(((float)anchura) / TILE_WIDTH); //Cantidad de bloques de alto con medida TILE_WIDTH int alt = ceil(((float)altura) / TILE_WIDTH); /* Inicializacion random en CUDA */ hipMalloc(&devStates, size * sizeof(hiprandState_t)); /* Creacion de las Seeds */ setup_kernel << < 1, size >> > (devStates, unsigned(time(NULL))); /* Reservar memoria para tablero y tablero_d */ tablero = (float*)malloc(size * sizeof(float)); hipMalloc((void**)&tablero_d, size * sizeof(float)); /* Se inicializa la matriz */ dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); generacionInicialRandomJewels << <dimGrid, dimBlock >> >(tablero_d, dificultad, anchura, altura, TILE_WIDTH, devStates); hipMemcpy(tablero, tablero_d, size * sizeof(float), hipMemcpyDeviceToHost); //Bucle principal del juego while (jugando) { printf("%i", size); printTablero(tablero, anchura, altura); int jewel1_x = 0; int jewel1_y = 0; int accion = 0; std::cout << "Accin a realizar:\n"; std::cout << "(1) Intercambiar Jewels\n"; std::cout << "(2) Guardar partida\n"; std::cout << "(3) Cargar partida\n"; std::cout << "(9) Usar una Bomba\n"; std::cout << "(0) Exit\n"; std::cout << "Elija accion: "; std::cin >> accion; switch (accion) { /* EXIT */ case 0: { free(tablero); hipFree(tablero_d); hipFree(devStates); return 0; } /* Intercambio de jewel */ case 1: { printf("%i", seleccion); if (seleccion == 2) { std::cout << "Posicion de la primera jewel a intercambiar (empiezan en 0)\n"; std::cout << "Columna: "; std::cin >> jewel1_x; std::cout << "Fila: "; std::cin >> jewel1_y; if (!((jewel1_x < anchura) && (jewel1_x >= 0) && (jewel1_y < altura) && (jewel1_y >= 0))) { printf("Posicion erronea.\n"); continue; } int direccion = 0; std::cout << "Direccion a seguir para intercambio de posiciones: \n 1.-Arriba\n 2.-Abajo\n 3.-Izquierda\n 4.-Derecha\n"; std::cin >> direccion; if (direccion > 4 && direccion > 1) { printf("Direccion erronea.\n"); continue; } else { switch (direccion) { case 1: //Arriba { if (jewel1_y == altura) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } case 2: //Abajo { if (jewel1_y == 0) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } case 3: //Izquierda { if (jewel1_x == 0) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } case 4: //Derecha { if (jewel1_x == anchura - 1) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } } } /* Intercambiar posiciones */ intercambiarPosiciones(tablero, jewel1_x, jewel1_y, direccion, anchura, altura, seleccion, dificultad, TILE_WIDTH, devStates); } else if (seleccion == 1) { /* Analisis automatico */ analisisTableroAutomatico(dificultad, tablero, anchura, altura, TILE_WIDTH, devStates); } break; } /* Guardar Partida */ case 2: { guardado(tablero, anchura, altura, dificultad, ficheroGuardado); std::cout << "Guardado correcto.\n"; break; } /* Cargar Partida */ case 3: { /* Precarga de tablero */ int encontrado = precargar(anchura, altura, dificultad, ficheroGuardado); size = anchura*altura; if (encontrado) { free(tablero); hipFree(tablero_d); tablero = (float*)malloc(size * sizeof(float)); hipMalloc((void**)&tablero_d, size * sizeof(float)); /* Cargar tablero */ cargar(anchura, altura, tablero, ficheroGuardado); std::cout << "Automatico? 1.-SI 2.-NO\n"; std::cin >> seleccion; std::cout << "Se ha cargado el Tablero: \n"; } else { std::cout << "No existe ninguna partida guardada.\n"; } break; } /* Bombas */ case 9: { int bomba = 0; int fila = 0; int columna = 0; std::cout << "Elija una bomba:"; hipMemcpy(tablero_d, tablero, size * sizeof(float), hipMemcpyHostToDevice); /* Bombas por tipo de dificultad */ switch (dificultad) { case 1: { std::cout << "(1) Bomba de fila "; std::cout << "\nEleccion: "; std::cin >> bomba; if (bomba != 1) { printf("Bomba erronea.\n"); continue; } std::cout << "Fila: "; std::cin >> fila; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaFila << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, dificultad, fila, TILE_WIDTH, devStates); break; } case 2: { std::cout << "(1) Bomba de fila"; std::cout << "(2) Bomba de columna"; std::cout << "\nEleccion: "; std::cin >> bomba; if (bomba < 1 && bomba > 2) { printf("Bomba erronea.\n"); continue; } switch (bomba) { case 1: { std::cout << "Fila: "; std::cin >> fila; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaFila << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, dificultad, fila, TILE_WIDTH, devStates); break; } case 2: { std::cout << "Columna: "; std::cin >> columna; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaColumna << <dimGrid, dimBlock >> >(tablero_d, anchura, altura, dificultad, columna, TILE_WIDTH, devStates); break; } } break; } case 3: { std::cout << "(1) Bomba de fila"; std::cout << "(2) Bomba de columna"; std::cout << "(3) Bomba de rotacion 3x3"; std::cout << "\nEleccion: "; std::cin >> bomba; if (bomba < 1 && bomba > 3) { printf("Bomba erronea.\n"); continue; } switch (bomba) { case 1: { std::cout << "Fila: "; std::cin >> fila; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaFila << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, dificultad, fila, TILE_WIDTH, devStates); break; } case 2: { std::cout << "Columna: "; std::cin >> columna; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaColumna << <dimGrid, dimBlock >> >(tablero_d, anchura, altura, dificultad, columna, TILE_WIDTH, devStates); break; } case 3: { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt);; bombaRotar << <dimGrid, dimBlock >> >(tablero_d, anchura, altura, TILE_WIDTH); break; } } break; } } hipMemcpy(tablero, tablero_d, size * sizeof(float), hipMemcpyDeviceToHost); break; } } } free(tablero); hipFree(tablero_d); hipFree(devStates); return 0; }
1489d2ece35c0eb53288a3f6834bf68c15a9470d.cu
#include <stdio.h> #include <cuda_runtime.h> #include <iostream> #include <cstdlib> #include <curand.h> #include <curand_kernel.h> #include <ctime> #include <fstream> //Analiza las propiedades de la tarjeta grafica para devolver el tamaño adecuado de tile, tambien trata el tamaño del tablero int obtenerTileWidth(int anchura, int altura) { float min_medida = 0; if (anchura > altura) min_medida = anchura; else min_medida = altura; cudaDeviceProp propiedades; cudaGetDeviceProperties(&propiedades, 0); int max_threads = propiedades.maxThreadsPerBlock; if (anchura == altura) { //Si la matriz es cuadrada, para no tener 1 solo bloque if (min_medida / 32 > 1 && max_threads == 1024) { //Solo si tiene 1024 hilos por bloque podra ser de 32x32 return 32; } else if (min_medida / 16 > 1) { return 16; } else if (min_medida / 8 > 1) { return 8; } else if (min_medida / 4 > 1) { return 4; } else if (min_medida / 2 > 1) { return 2; } } else { //si la matriz no es cuadrada if (min_medida / 32 >= 1 && max_threads == 1024) { return 32; } else if (min_medida / 16 >= 1) { return 16; } else if (min_medida / 8 >= 1) { return 8; } else if (min_medida / 4 >= 1) { return 4; } else if (min_medida / 2 >= 1) { return 2; } } return -1; } //funcion para generar una jewel aleatoria, como la generacion inicial. /* Funciones para generar gemas aleatorias */ /* Iniciador de seeds */ __global__ void setup_kernel(curandState * state, unsigned long seed) { int id = threadIdx.x; curand_init(seed, id, 0, &state[id]); } /* Crear jewel usando globalState */ __device__ float generate(curandState* globalState, int ind) { curandState localState = globalState[ind]; float RANDOM = curand_uniform(&localState); globalState[ind] = localState; return RANDOM; } /* Funcion para generarJewel en CUDA */ __device__ int generarJewelCUDA(curandState* globalState, int ind, int dificultad) { switch (dificultad) { case 1: { return (int)1 + generate(globalState, ind) * 4; } case 2: { return (int)1 + generate(globalState, ind) * 6; } case 3: { return (int)1 + generate(globalState, ind) * 8; } } return -1; } /* Funcion para inicializar la matriz de gemas */ __global__ void generacionInicialRandomJewels(float *tablero, int dificultad, int anchura, int altura, int TILE_WIDTH, curandState* globalState) { int tFila = blockIdx.y*TILE_WIDTH + threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; if (tFila < altura) { if (tColumna < anchura) { tablero[tFila*anchura + tColumna] = generarJewelCUDA(globalState, tFila * anchura + tColumna, dificultad); } } } void printTablero(float* tablero, int anchura, int altura) { for (int i = altura - 1; i >= 0; i--) { printf("\n"); for (int j = 0; j < anchura; j++) { printf("%d ", (int)tablero[j + i*anchura]); } } printf("\n"); } /*Recibe las coordenadas de las jewels a eliminar y mueve las filas que tiene que bajar a partir de ellas, emplea una copia del tablero para evitar race conditions*/ __global__ void eliminarJewelsKernel(float* tablero_d, float* tablero_aux_d, float* jewels_eliminadas_d, int dificultad, int anchura, int altura, int final, int TILE_WIDTH, curandState* globalState) { int tx = threadIdx.x; int ty = threadIdx.y; int block_x = blockIdx.x; int block_y = blockIdx.y; //Posicion real dentro del tablero tx += block_x * TILE_WIDTH; ty += block_y * TILE_WIDTH; if (jewels_eliminadas_d[0] != jewels_eliminadas_d[2] && tx >= jewels_eliminadas_d[0] && tx <= jewels_eliminadas_d[final - 2] && ty >= jewels_eliminadas_d[1]) { if (ty + 1 < altura) { float value = tablero_aux_d[tx + (ty + 1)*anchura]; tablero_d[tx + (ty)*(anchura)] = value; } else { tablero_d[tx + ty*anchura] = generarJewelCUDA(globalState, tx + ty*anchura, dificultad); } } else { if (ty < altura && tx == jewels_eliminadas_d[0] && ty > jewels_eliminadas_d[1]) { float value = tablero_aux_d[tx + (ty)*anchura]; tablero_d[tx + (ty - final / 2)*(anchura)] = value; } if (ty >= altura - final / 2 && ty < altura && tx == jewels_eliminadas_d[0]) { tablero_d[tx + (ty)*anchura] = generarJewelCUDA(globalState, tx + ty*anchura, dificultad); } } } /*Funcion que prepara y llama el kernel con su mismo nombre, genera todos los datos necesarios*/ void eliminarJewels(float* tablero, float* jewels_eliminadas, int dificultad, int anchura, int altura, int TILE_WIDTH, curandState* globalState) { float *tablero_d; float *jewels_eliminadas_d; float *tablero_aux_d; int size = anchura * altura * sizeof(float); int max = 0; //Para saber que medida es la más grande, ya que no se pueden eliminar más jewels seguidas que esa medida if (altura >= anchura) max = altura; else max = anchura; //Tablero a GPU y la copia del tablero cudaMalloc((void**)&tablero_d, size); cudaMemcpy(tablero_d, tablero, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&tablero_aux_d, size); cudaMemcpy(tablero_aux_d, tablero, size, cudaMemcpyHostToDevice); //Jewels a eliminar a GPU. 2*max ya que cada posicion son dos coordenadas, x e y cudaMalloc((void**)&jewels_eliminadas_d, 2 * max * sizeof(float)); cudaMemcpy(jewels_eliminadas_d, jewels_eliminadas, 2 * max * sizeof(float), cudaMemcpyHostToDevice); int final = 0; bool modif = false; //Calcula cual es el ultimo valor escrito de las jewels a eliminar, ya que puede haber posiciones no escritas for (int i = 0; i < max * 2; i++) { if (jewels_eliminadas[i] < 0) { final = i; modif = true; break; } } //En caso de que este completamente escrito if (!modif) final = max * 2; //Cantidad de bloques de ancho de medida TILE_WIDTH int anch = ceil(((double)anchura) / TILE_WIDTH); //Cantidad de bloques de alto con medida TILE_WIDTH int alt = ceil(((double)altura) / TILE_WIDTH); //Configuracion de ejecucion dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); eliminarJewelsKernel << <dimGrid, dimBlock >> > (tablero_d, tablero_aux_d, jewels_eliminadas_d, dificultad, anchura, altura, final, TILE_WIDTH, globalState); //Se recupera el tablero actualizado cudaMemcpy(tablero, tablero_d, size, cudaMemcpyDeviceToHost); //Libera memoria cudaFree(tablero_d); cudaFree(jewels_eliminadas_d); cudaFree(tablero_aux_d); } /*Escribe en un tablero auxiliar la cantidad de jewels que se eliminarian moviendo una jewel (x,y) hacia la derecha paralelizable ya que todos los hilos (cada hilo 1 jewel) tienen que expandirse hacia la derecha para ver hasta donde llegarian a eliminar*/ __global__ void analisisTableroAutomaticoKernel(float *tablero_d, float *aux_d, int dificultad, int anchura, int altura, int TILE_WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int block_x = blockIdx.x; int block_y = blockIdx.y; //Posicion real dentro del tablero tx += block_x * TILE_WIDTH; ty += block_y * TILE_WIDTH; int jewels_posibles_der = 0; //Si tiene por la derecha if ((tx + 2) < anchura) { if (((tx + 2) + ty*anchura <= altura*anchura) && tablero_d[tx + 2 + ty*anchura] == tablero_d[tx + ty*anchura]) { int i = 2; //Se expande while ((tx + i + ty*anchura <= altura*anchura) && tablero_d[tx + i + ty*anchura] == tablero_d[tx + ty*anchura]) { jewels_posibles_der++; i++; } aux_d[tx + ty*anchura] = jewels_posibles_der + 1; } else { aux_d[tx + ty*anchura] = 1; } } else { aux_d[tx + ty*anchura] = 1; } //printf("%i-%f ", tx + ty*anchura, aux_d[tx + ty*anchura]); } //Analiza el movimiento manual, usando las coordenadas de la nueva posicion de la jewel seleccionada void analisisTableroManual(int dificultad, float* tablero, int anchura, int altura, int x, int y, int TILE_WIDTH, curandState* globalState) { int max = 0; int size = anchura*altura; if (altura >= anchura) max = altura; else max = anchura; //Solo se eliminan MAX jewels como mucho, se guardan sus x e y float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float)); //Se inicializa a -1 àra saber hasta que punto se escribe for (int i = 0; i < max; i++) { jewels_eliminadas[i] = -1; } int jewels_posibles_izq = 0; int jewels_posibles_der = 0; //Si tiene por la izquierda if ((x - 1 + y*anchura >= 0) && tablero[x - 1 + y*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x - i + y*anchura >= 0) && (x - i >= 0) && tablero[x - i + y*anchura] == tablero[x + y*anchura]) { jewels_posibles_izq++; i++; } } //Si tiene por la derecha if ((x + 1 + y*anchura <= size) && tablero[x + 1 + y*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x + i + y*anchura <= size) && (x + i < anchura) && tablero[x + i + y*anchura] == tablero[x + y*anchura]) { jewels_posibles_der++; i++; } } //Se pueden eliminar horizontalmente, las coloca en orden para facilitar su eliminacion if (1 + jewels_posibles_izq + jewels_posibles_der >= 3) { int salto = 0; for (int j = jewels_posibles_izq; j >= (1); j--) { jewels_eliminadas[salto] = x - j; jewels_eliminadas[salto + 1] = y; salto += 2; } jewels_eliminadas[jewels_posibles_izq * 2] = x; jewels_eliminadas[jewels_posibles_izq * 2 + 1] = y; salto = 2; for (int k = 1; k <= jewels_posibles_der; k++) { jewels_eliminadas[salto + jewels_posibles_izq * 2] = x + k; jewels_eliminadas[salto + jewels_posibles_izq * 2 + 1] = y; salto += 2; } } else { //Analizamos la vertical int jewels_posibles_arrib = 0; int jewels_posibles_abaj = 0; //Si tiene por abajo if ((x + (y - 1)*anchura >= 0) && tablero[x + (y - 1)*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x + (y - i)*anchura >= 0) && tablero[x + (y - i)*anchura] == tablero[x + y*anchura]) { jewels_posibles_abaj++; i++; } } //Si tiene por arriba if ((x + 1 + y*anchura <= size) && tablero[x + (y + 1)*anchura] == tablero[x + y*anchura]) { int i = 1; while ((x + (y + i)*anchura <= size) && tablero[x + (y + i)*anchura] == tablero[x + y*anchura]) { jewels_posibles_arrib++; i++; } } //Se pueden eliminar if (1 + jewels_posibles_abaj + jewels_posibles_arrib >= 3) { int salto = 0; for (int j = jewels_posibles_abaj; j >= (1); j--) { jewels_eliminadas[salto] = x; jewels_eliminadas[salto + 1] = y - j; salto += 2; } jewels_eliminadas[jewels_posibles_abaj * 2] = x; jewels_eliminadas[jewels_posibles_abaj * 2 + 1] = y; salto = 2; for (int k = 1; k <= jewels_posibles_arrib; k++) { jewels_eliminadas[salto + jewels_posibles_abaj * 2] = x; jewels_eliminadas[salto + jewels_posibles_abaj * 2 + 1] = y + k; salto += 2; } } } //Las elimina eliminarJewels(tablero, jewels_eliminadas, dificultad, anchura, altura, TILE_WIDTH, globalState); free(jewels_eliminadas); } //Intercambia la jewel seleccionadas con la jewel en la dirección indicada void intercambiarPosiciones(float* tablero, int jewel1_x, int jewel1_y, int direccion, int anchura, int altura, int seleccion, int dificultad, int TILE_WIDTH, curandState* globalState) { int jewel2_x = jewel1_x; int jewel2_y = jewel1_y; switch (direccion) { case 1: //Arriba { jewel2_y += 1; break; } case 2: //Abajo { jewel2_y -= 1; break; } case 3: //Izquierda { jewel2_x -= 1; break; } case 4: //Derecha { jewel2_x += 1; break; } } int aux1; aux1 = tablero[jewel2_x + jewel2_y*anchura]; tablero[jewel2_x + jewel2_y*anchura] = tablero[jewel1_x + jewel1_y*anchura]; tablero[jewel1_x + jewel1_y*anchura] = aux1; //Analiza el movimiento para ver si se pueden eliminar jewels analisisTableroManual(dificultad, tablero, anchura, altura, jewel2_x, jewel2_y, TILE_WIDTH, globalState); } //Analiza la mejor opcion y la ejecuta en funcion de lo que devuelve el kernel void analisisTableroAutomatico(int dificultad, float* tablero, int anchura, int altura, int TILE_WIDTH, curandState* globalState) { float *tablero_d; float *aux_d; float *aux; //Tamaño del tablero para asignar memoria int size = anchura * altura * sizeof(float); int tam = anchura * altura; int max = 0; if (altura >= anchura) max = altura; else max = anchura; //Solo se eliminan max jewels, 2 coordenadas por jewel = 2 * max posiciones float* jewels_eliminadas = (float*)malloc(2 * max * sizeof(float)); aux = (float*)malloc(size); for (int i = 0; i < max; i++) { jewels_eliminadas[i] = -1; } //Solo se cuenta la jewel que se escoge, sigue siendo menor que 3 for (int p = 0; p < tam; p++) { aux[p] = 1; } //Tablero a GPU cudaMalloc((void**)&tablero_d, size); cudaMemcpy(tablero_d, tablero, size, cudaMemcpyHostToDevice); //Auxiliar de conteo a GPU cudaMalloc((void**)&aux_d, size); cudaMemcpy(aux_d, aux, size, cudaMemcpyHostToDevice); //Cantidad de bloques de ancho de medida TILE_WIDTH int anch = ceil(((double)anchura) / TILE_WIDTH); //Cantidad de bloques de alto con medida TILE_WIDTH int alt = ceil(((double)altura) / TILE_WIDTH); //Configuracion de ejecucion dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); //Inicio del kernel analisisTableroAutomaticoKernel << <dimGrid, dimBlock >> > (tablero_d, aux_d, dificultad, anchura, altura, TILE_WIDTH); //Transfiere el resultado de la GPU al host cudaMemcpy(aux, aux_d, size, cudaMemcpyDeviceToHost); int x_mejor = 0; int y_mejor = 0; int valor_mejor = 0; //Se busca el movimiento con el mayor numero de jewels eliminadas for (int y = 0; y < altura; y++) { for (int x = 0; x < anchura; x++) { if (aux[x + y*anchura] > valor_mejor) { valor_mejor = aux[x + y*anchura]; x_mejor = x; y_mejor = y; } } } //Si se pueden eliminar se ejecuta el movimiento, con lo que ello conlleva if (valor_mejor >= 3) { intercambiarPosiciones(tablero, x_mejor, y_mejor, 4, anchura, altura, 1, dificultad, TILE_WIDTH, globalState); } free(aux); free(jewels_eliminadas); cudaFree(tablero_d); cudaFree(aux_d); } bool precargar(int& anchura, int& altura, int& dificultad, char* fichero) { std::ifstream fAnchura("anchura.txt"); std::ifstream fAltura("altura.txt"); std::ifstream fDificultad("dificultad.txt"); std::ifstream fCarga(fichero); if (!fAnchura.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } if (!fAltura.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } if (!fDificultad.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } if (!fCarga.is_open()) { std::cout << "ERROR: no existe un archivo guardado." << std::endl; return false; } fAnchura >> anchura; fAltura >> altura; fDificultad >> dificultad; fAnchura.close(); fAltura.close(); fDificultad.close(); fCarga.close(); return true; } void cargar(int anchura, int altura, float* tablero, char* fichero) { int aux; char* array = (char*)malloc(anchura*altura + 1); std::ifstream fCarga(fichero); fCarga.getline(array, anchura*altura + 1); for (int i = 0; i < anchura*altura; i++) { aux = (array[i] - 48); tablero[i] = (float)aux; } free(array); fCarga.close(); } void guardado(float* tablero, int anchura, int altura, int dificultad, char* fichero) { //Sistema de guardado std::ofstream ficheroGuardado; std::ofstream ficheroAnchura; std::ofstream ficheroAltura; std::ofstream ficheroDificultad; /* Abrirlos */ ficheroGuardado.open(fichero); ficheroAnchura.open("Anchura.txt"); ficheroAltura.open("Altura.txt"); ficheroDificultad.open("Dificultad.txt"); /* Limpiar el contenido */ ficheroGuardado.clear(); ficheroAnchura.clear(); ficheroAltura.clear(); ficheroDificultad.clear(); /* Almacenar anchura y altura*/ ficheroAnchura << anchura; ficheroAltura << altura; ficheroDificultad << dificultad; /* Almacenar Resto */ for (int index = 0; index < anchura*altura; index++) { ficheroGuardado << tablero[index]; } ficheroGuardado.close(); ficheroAnchura.close(); ficheroAltura.close(); ficheroDificultad.close(); } /* Funcion que elimina una fila */ __global__ void bombaFila(float* tablero, int anchura, int altura, int dificultad, int fila, int TILE_WIDTH, curandState* globalState) { int tFila = blockIdx.y*TILE_WIDTH + threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; float aux; if ((tFila + fila) < altura) { if (tColumna < anchura) { if ((tFila + fila + 1) == altura) { tablero[(tFila + fila)*anchura + tColumna] = generarJewelCUDA(globalState, (tFila * 3 + tColumna), dificultad); } else { aux = tablero[(tFila + fila + 1)*anchura + tColumna]; tablero[(tFila + fila)*anchura + tColumna] = aux; } } } } /* Funcion que elimina una columna */ __global__ void bombaColumna(float* tablero, int anchura, int altura, int dificultad, int columna, int TILE_WIDTH, curandState* globalState) { int tFila = blockIdx.y*TILE_WIDTH +threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; if (tFila < altura) { if ((columna - tColumna) >= 0) { if ((columna - tColumna - 1) < 0) { tablero[(tFila*anchura) + (columna - tColumna)] = generarJewelCUDA(globalState, (tFila * 3 + tColumna), dificultad); } else { tablero[(tFila*anchura) + (columna - tColumna)] = tablero[(tFila*anchura) + (columna - tColumna - 1)]; } } } } __global__ void bombaRotarGPU(float* tablero, int anchura, int altura, int fila, int columna) { int tFila = threadIdx.y; int tColumna = threadIdx.x; if (tFila < 3) { if (tColumna < 3) { tablero[(fila + 1 - tColumna)*anchura + (columna - 1 + tFila)] = tablero[((fila + 1) - tFila)*anchura + ((columna + 1) - tColumna)]; } } } __global__ void bombaRotar(float* tablero_d, int anchura, int altura, int TILE_WIDTH) { int tFila = blockIdx.y*TILE_WIDTH + threadIdx.y; int tColumna = blockIdx.x*TILE_WIDTH + threadIdx.x; if (tFila < altura && tColumna < anchura) { if ((tFila - 1) < 0 || (tFila + 1) >= altura || (tColumna - 1) < 0 || (tColumna + 1) >= anchura) { /* Se entra cuando no se puede rotar */ } else { if (tFila % 3 == 1 && tColumna % 3 == 1) { dim3 dimBlock(3, 3); dim3 dimGrid(1, 1); bombaRotarGPU << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, tFila, tColumna); } } } } int main(int argc, char** argv) { //Matriz de tamaño variable de floats, un array de Altura*Anchura int anchura; int altura; int dificultad; char modo; int size; char ficheroGuardado[9] = "save.txt"; int seleccion; float* tablero; float* tablero_d; curandState* devStates; bool jugando = true; /* Valores por argumento*/ if (argc == 1) { std::cout << "Anchura del tablero: "; std::cin >> anchura; std::cout << "Altura del tablero: "; std::cin >> altura; std::cout << "Elija dificultad: \n1.-Facil \n2.-Media \n3.-Dificil\n"; std::cin >> dificultad; std::cout << "Automatico? 1.-SI 2.-NO\n"; std::cin >> seleccion; } else { modo = argv[1][1]; dificultad = atoi(argv[2]); anchura = atoi(argv[3]); altura = atoi(argv[4]); switch (modo) { case 'a': {seleccion = 1; break; } case 'm': {seleccion = 2; break; } default: printf("Valor no valido.\n"); return -1; } } size = anchura*altura; //Tamaño de los bloques a crear en CUDA int TILE_WIDTH = obtenerTileWidth(anchura, altura); if (TILE_WIDTH == -1) { printf("ERROR: TILE_WIDTH no valido"); return 0; } //Cantidad de bloques de ancho de medida TILE_WIDTH int anch = ceil(((float)anchura) / TILE_WIDTH); //Cantidad de bloques de alto con medida TILE_WIDTH int alt = ceil(((float)altura) / TILE_WIDTH); /* Inicializacion random en CUDA */ cudaMalloc(&devStates, size * sizeof(curandState)); /* Creacion de las Seeds */ setup_kernel << < 1, size >> > (devStates, unsigned(time(NULL))); /* Reservar memoria para tablero y tablero_d */ tablero = (float*)malloc(size * sizeof(float)); cudaMalloc((void**)&tablero_d, size * sizeof(float)); /* Se inicializa la matriz */ dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); generacionInicialRandomJewels << <dimGrid, dimBlock >> >(tablero_d, dificultad, anchura, altura, TILE_WIDTH, devStates); cudaMemcpy(tablero, tablero_d, size * sizeof(float), cudaMemcpyDeviceToHost); //Bucle principal del juego while (jugando) { printf("%i", size); printTablero(tablero, anchura, altura); int jewel1_x = 0; int jewel1_y = 0; int accion = 0; std::cout << "Acción a realizar:\n"; std::cout << "(1) Intercambiar Jewels\n"; std::cout << "(2) Guardar partida\n"; std::cout << "(3) Cargar partida\n"; std::cout << "(9) Usar una Bomba\n"; std::cout << "(0) Exit\n"; std::cout << "Elija accion: "; std::cin >> accion; switch (accion) { /* EXIT */ case 0: { free(tablero); cudaFree(tablero_d); cudaFree(devStates); return 0; } /* Intercambio de jewel */ case 1: { printf("%i", seleccion); if (seleccion == 2) { std::cout << "Posicion de la primera jewel a intercambiar (empiezan en 0)\n"; std::cout << "Columna: "; std::cin >> jewel1_x; std::cout << "Fila: "; std::cin >> jewel1_y; if (!((jewel1_x < anchura) && (jewel1_x >= 0) && (jewel1_y < altura) && (jewel1_y >= 0))) { printf("Posicion erronea.\n"); continue; } int direccion = 0; std::cout << "Direccion a seguir para intercambio de posiciones: \n 1.-Arriba\n 2.-Abajo\n 3.-Izquierda\n 4.-Derecha\n"; std::cin >> direccion; if (direccion > 4 && direccion > 1) { printf("Direccion erronea.\n"); continue; } else { switch (direccion) { case 1: //Arriba { if (jewel1_y == altura) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } case 2: //Abajo { if (jewel1_y == 0) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } case 3: //Izquierda { if (jewel1_x == 0) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } case 4: //Derecha { if (jewel1_x == anchura - 1) { printf("No se puede realizar el intercambio especificado.\n"); continue; } break; } } } /* Intercambiar posiciones */ intercambiarPosiciones(tablero, jewel1_x, jewel1_y, direccion, anchura, altura, seleccion, dificultad, TILE_WIDTH, devStates); } else if (seleccion == 1) { /* Analisis automatico */ analisisTableroAutomatico(dificultad, tablero, anchura, altura, TILE_WIDTH, devStates); } break; } /* Guardar Partida */ case 2: { guardado(tablero, anchura, altura, dificultad, ficheroGuardado); std::cout << "Guardado correcto.\n"; break; } /* Cargar Partida */ case 3: { /* Precarga de tablero */ int encontrado = precargar(anchura, altura, dificultad, ficheroGuardado); size = anchura*altura; if (encontrado) { free(tablero); cudaFree(tablero_d); tablero = (float*)malloc(size * sizeof(float)); cudaMalloc((void**)&tablero_d, size * sizeof(float)); /* Cargar tablero */ cargar(anchura, altura, tablero, ficheroGuardado); std::cout << "Automatico? 1.-SI 2.-NO\n"; std::cin >> seleccion; std::cout << "Se ha cargado el Tablero: \n"; } else { std::cout << "No existe ninguna partida guardada.\n"; } break; } /* Bombas */ case 9: { int bomba = 0; int fila = 0; int columna = 0; std::cout << "Elija una bomba:"; cudaMemcpy(tablero_d, tablero, size * sizeof(float), cudaMemcpyHostToDevice); /* Bombas por tipo de dificultad */ switch (dificultad) { case 1: { std::cout << "(1) Bomba de fila "; std::cout << "\nEleccion: "; std::cin >> bomba; if (bomba != 1) { printf("Bomba erronea.\n"); continue; } std::cout << "Fila: "; std::cin >> fila; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaFila << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, dificultad, fila, TILE_WIDTH, devStates); break; } case 2: { std::cout << "(1) Bomba de fila"; std::cout << "(2) Bomba de columna"; std::cout << "\nEleccion: "; std::cin >> bomba; if (bomba < 1 && bomba > 2) { printf("Bomba erronea.\n"); continue; } switch (bomba) { case 1: { std::cout << "Fila: "; std::cin >> fila; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaFila << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, dificultad, fila, TILE_WIDTH, devStates); break; } case 2: { std::cout << "Columna: "; std::cin >> columna; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaColumna << <dimGrid, dimBlock >> >(tablero_d, anchura, altura, dificultad, columna, TILE_WIDTH, devStates); break; } } break; } case 3: { std::cout << "(1) Bomba de fila"; std::cout << "(2) Bomba de columna"; std::cout << "(3) Bomba de rotacion 3x3"; std::cout << "\nEleccion: "; std::cin >> bomba; if (bomba < 1 && bomba > 3) { printf("Bomba erronea.\n"); continue; } switch (bomba) { case 1: { std::cout << "Fila: "; std::cin >> fila; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaFila << <dimGrid, dimBlock >> > (tablero_d, anchura, altura, dificultad, fila, TILE_WIDTH, devStates); break; } case 2: { std::cout << "Columna: "; std::cin >> columna; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt); bombaColumna << <dimGrid, dimBlock >> >(tablero_d, anchura, altura, dificultad, columna, TILE_WIDTH, devStates); break; } case 3: { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(anch, alt);; bombaRotar << <dimGrid, dimBlock >> >(tablero_d, anchura, altura, TILE_WIDTH); break; } } break; } } cudaMemcpy(tablero, tablero_d, size * sizeof(float), cudaMemcpyDeviceToHost); break; } } } free(tablero); cudaFree(tablero_d); cudaFree(devStates); return 0; }
24c3d00bf319b9c802d4484021422144698b3311.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> __global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){ //this kernel handles up to 4096 points const int NMax=4096; __shared__ short Queue[NMax]; __shared__ short matchrbuf[NMax]; __shared__ float pricer[NMax]; __shared__ float bests[32][3]; __shared__ int qhead,qlen; const int BufLen=2048; __shared__ float buf[BufLen]; for (int bno=blockIdx.x;bno<b;bno+=gridDim.x){ int cnt=0; float tolerance=1e-4; for (int j=threadIdx.x;j<n;j+=blockDim.x) matchl[bno*n+j]=-1; for (int j=threadIdx.x;j<n;j+=blockDim.x) matchrbuf[j]=-1; for (int j=threadIdx.x;j<n;j+=blockDim.x) Queue[j]=j; for (int j=threadIdx.x;j<n;j+=blockDim.x) pricer[j]=0; const int Block=512; for (int k0=0;k0<n;k0+=Block){ int k1=min(n,k0+Block); for (int k=threadIdx.x;k<(k1-k0)*3;k+=blockDim.x) buf[k]=xyz1[bno*n*3+k0*3+k]; __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x2=xyz2[bno*n*3+j*3+0]; float y2=xyz2[bno*n*3+j*3+1]; float z2=xyz2[bno*n*3+j*3+2]; for (int k=k0;k<k1;k++){ float x1=buf[(k-k0)*3+0]; float y1=buf[(k-k0)*3+1]; float z1=buf[(k-k0)*3+2]; float d=sqrtf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2)); cost[blockIdx.x*n*n+k*n+j]=d; } } __syncthreads(); } if (threadIdx.x==0){ qhead=0; qlen=n; } __syncthreads(); int loaded=0; float value9,value10,value11,value12,value13,value14,value15,value16; while (qlen){ int i=Queue[qhead]; int i2; if (qhead+1<n) i2=Queue[qhead+1]; else i2=Queue[0]; float best=1e38f,best2=1e38f; int bestj=0; if (n==blockDim.x*8){ int j=threadIdx.x; float value1,value2,value3,value4,value5,value6,value7,value8; if (loaded){ value1=value9+pricer[j]; value2=value10+pricer[j+blockDim.x]; value3=value11+pricer[j+blockDim.x*2]; value4=value12+pricer[j+blockDim.x*3]; value5=value13+pricer[j+blockDim.x*4]; value6=value14+pricer[j+blockDim.x*5]; value7=value15+pricer[j+blockDim.x*6]; value8=value16+pricer[j+blockDim.x*7]; loaded=0; }else{ value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; value5=cost[blockIdx.x*n*n+i*n+j+blockDim.x*4]+pricer[j+blockDim.x*4]; value6=cost[blockIdx.x*n*n+i*n+j+blockDim.x*5]+pricer[j+blockDim.x*5]; value7=cost[blockIdx.x*n*n+i*n+j+blockDim.x*6]+pricer[j+blockDim.x*6]; value8=cost[blockIdx.x*n*n+i*n+j+blockDim.x*7]+pricer[j+blockDim.x*7]; value9=cost[blockIdx.x*n*n+i2*n+j]; value10=cost[blockIdx.x*n*n+i2*n+j+blockDim.x]; value11=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*2]; value12=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*3]; value13=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*4]; value14=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*5]; value15=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*6]; value16=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*7]; loaded=qlen>1; } int vj,vj2,vj3,vj4; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value5<value6){ vj3=j+blockDim.x*4; }else{ vj3=j+blockDim.x*5; float t=value5; value5=value6; value6=t; } if (value7<value8){ vj4=j+blockDim.x*6; }else{ vj4=j+blockDim.x*7; float t=value7; value7=value8; value8=t; } if (value1<value3){ value2=fminf(value2,value3); }else{ value2=fminf(value1,value4); value1=value3; vj=vj2; } if (value5<value7){ value6=fminf(value6,value7); }else{ value6=fminf(value5,value8); value5=value7; vj3=vj4; } if (value1<value5){ best=value1; bestj=vj; best2=fminf(value2,value5); }else{ best2=fminf(value1,value6); best=value5; bestj=vj3; } }else if (n>=blockDim.x*4){ for (int j=threadIdx.x;j<n;j+=blockDim.x*4){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; float value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; float value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; int vj,vj2; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value1<value3){ value2=fminf(value2,value3); }else{ value2=fminf(value1,value4); value1=value3; vj=vj2; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else if (n>=blockDim.x*2){ for (int j=threadIdx.x;j<n;j+=blockDim.x*2){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; int vj; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else{ for (int j=threadIdx.x;j<n;j+=blockDim.x){ float value=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; if (best<value){ best2=fminf(best2,value); }else{ best2=best; bestj=j; best=value; } } } for (int i=16;i>0;i>>=1){ float b1=__shfl_down(best,i,32); float b2=__shfl_down(best2,i,32); int bj=__shfl_down(bestj,i,32); if (best<b1){ best2=fminf(b1,best2); }else{ best=b1; best2=fminf(best,b2); bestj=bj; } } if ((threadIdx.x&31)==0){ bests[threadIdx.x>>5][0]=best; bests[threadIdx.x>>5][1]=best2; *(int*)&bests[threadIdx.x>>5][2]=bestj; } __syncthreads(); int nn=blockDim.x>>5; if (threadIdx.x<nn){ best=bests[threadIdx.x][0]; best2=bests[threadIdx.x][1]; bestj=*(int*)&bests[threadIdx.x][2]; for (int i=nn>>1;i>0;i>>=1){ float b1=__shfl_down(best,i,32); float b2=__shfl_down(best2,i,32); int bj=__shfl_down(bestj,i,32); if (best<b1){ best2=fminf(b1,best2); }else{ best=b1; best2=fminf(best,b2); bestj=bj; } } } if (threadIdx.x==0){ float delta=best2-best+tolerance; qhead++; qlen--; if (qhead>=n) qhead-=n; int old=matchrbuf[bestj]; pricer[bestj]+=delta; cnt++; if (old!=-1){ int ql=qlen; int tail=qhead+ql; qlen=ql+1; if (tail>=n) tail-=n; Queue[tail]=old; } if (cnt==(40*n)){ if (tolerance==1.0) qlen=0; tolerance=fminf(1.0,tolerance*100); cnt=0; } } __syncthreads(); if (threadIdx.x==0){ matchrbuf[bestj]=i; } } __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x) matchr[bno*n+j]=matchrbuf[j]; for (int j=threadIdx.x;j<n;j+=blockDim.x) matchl[bno*n+matchrbuf[j]]=j; __syncthreads(); } } void AuctionMatchLauncher(int b,int n,const float * xyz1,const float * xyz2,int * matchl,int * matchr,float * cost){ hipLaunchKernelGGL(( AuctionMatchKernel), dim3(32),dim3(512), 0, 0, b,n,xyz1,xyz2,matchl,matchr,cost); }
24c3d00bf319b9c802d4484021422144698b3311.cu
#include <cstdio> __global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){ //this kernel handles up to 4096 points const int NMax=4096; __shared__ short Queue[NMax]; __shared__ short matchrbuf[NMax]; __shared__ float pricer[NMax]; __shared__ float bests[32][3]; __shared__ int qhead,qlen; const int BufLen=2048; __shared__ float buf[BufLen]; for (int bno=blockIdx.x;bno<b;bno+=gridDim.x){ int cnt=0; float tolerance=1e-4; for (int j=threadIdx.x;j<n;j+=blockDim.x) matchl[bno*n+j]=-1; for (int j=threadIdx.x;j<n;j+=blockDim.x) matchrbuf[j]=-1; for (int j=threadIdx.x;j<n;j+=blockDim.x) Queue[j]=j; for (int j=threadIdx.x;j<n;j+=blockDim.x) pricer[j]=0; const int Block=512; for (int k0=0;k0<n;k0+=Block){ int k1=min(n,k0+Block); for (int k=threadIdx.x;k<(k1-k0)*3;k+=blockDim.x) buf[k]=xyz1[bno*n*3+k0*3+k]; __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x2=xyz2[bno*n*3+j*3+0]; float y2=xyz2[bno*n*3+j*3+1]; float z2=xyz2[bno*n*3+j*3+2]; for (int k=k0;k<k1;k++){ float x1=buf[(k-k0)*3+0]; float y1=buf[(k-k0)*3+1]; float z1=buf[(k-k0)*3+2]; float d=sqrtf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2)); cost[blockIdx.x*n*n+k*n+j]=d; } } __syncthreads(); } if (threadIdx.x==0){ qhead=0; qlen=n; } __syncthreads(); int loaded=0; float value9,value10,value11,value12,value13,value14,value15,value16; while (qlen){ int i=Queue[qhead]; int i2; if (qhead+1<n) i2=Queue[qhead+1]; else i2=Queue[0]; float best=1e38f,best2=1e38f; int bestj=0; if (n==blockDim.x*8){ int j=threadIdx.x; float value1,value2,value3,value4,value5,value6,value7,value8; if (loaded){ value1=value9+pricer[j]; value2=value10+pricer[j+blockDim.x]; value3=value11+pricer[j+blockDim.x*2]; value4=value12+pricer[j+blockDim.x*3]; value5=value13+pricer[j+blockDim.x*4]; value6=value14+pricer[j+blockDim.x*5]; value7=value15+pricer[j+blockDim.x*6]; value8=value16+pricer[j+blockDim.x*7]; loaded=0; }else{ value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; value5=cost[blockIdx.x*n*n+i*n+j+blockDim.x*4]+pricer[j+blockDim.x*4]; value6=cost[blockIdx.x*n*n+i*n+j+blockDim.x*5]+pricer[j+blockDim.x*5]; value7=cost[blockIdx.x*n*n+i*n+j+blockDim.x*6]+pricer[j+blockDim.x*6]; value8=cost[blockIdx.x*n*n+i*n+j+blockDim.x*7]+pricer[j+blockDim.x*7]; value9=cost[blockIdx.x*n*n+i2*n+j]; value10=cost[blockIdx.x*n*n+i2*n+j+blockDim.x]; value11=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*2]; value12=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*3]; value13=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*4]; value14=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*5]; value15=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*6]; value16=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*7]; loaded=qlen>1; } int vj,vj2,vj3,vj4; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value5<value6){ vj3=j+blockDim.x*4; }else{ vj3=j+blockDim.x*5; float t=value5; value5=value6; value6=t; } if (value7<value8){ vj4=j+blockDim.x*6; }else{ vj4=j+blockDim.x*7; float t=value7; value7=value8; value8=t; } if (value1<value3){ value2=fminf(value2,value3); }else{ value2=fminf(value1,value4); value1=value3; vj=vj2; } if (value5<value7){ value6=fminf(value6,value7); }else{ value6=fminf(value5,value8); value5=value7; vj3=vj4; } if (value1<value5){ best=value1; bestj=vj; best2=fminf(value2,value5); }else{ best2=fminf(value1,value6); best=value5; bestj=vj3; } }else if (n>=blockDim.x*4){ for (int j=threadIdx.x;j<n;j+=blockDim.x*4){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; float value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; float value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; int vj,vj2; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value1<value3){ value2=fminf(value2,value3); }else{ value2=fminf(value1,value4); value1=value3; vj=vj2; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else if (n>=blockDim.x*2){ for (int j=threadIdx.x;j<n;j+=blockDim.x*2){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; int vj; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else{ for (int j=threadIdx.x;j<n;j+=blockDim.x){ float value=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; if (best<value){ best2=fminf(best2,value); }else{ best2=best; bestj=j; best=value; } } } for (int i=16;i>0;i>>=1){ float b1=__shfl_down(best,i,32); float b2=__shfl_down(best2,i,32); int bj=__shfl_down(bestj,i,32); if (best<b1){ best2=fminf(b1,best2); }else{ best=b1; best2=fminf(best,b2); bestj=bj; } } if ((threadIdx.x&31)==0){ bests[threadIdx.x>>5][0]=best; bests[threadIdx.x>>5][1]=best2; *(int*)&bests[threadIdx.x>>5][2]=bestj; } __syncthreads(); int nn=blockDim.x>>5; if (threadIdx.x<nn){ best=bests[threadIdx.x][0]; best2=bests[threadIdx.x][1]; bestj=*(int*)&bests[threadIdx.x][2]; for (int i=nn>>1;i>0;i>>=1){ float b1=__shfl_down(best,i,32); float b2=__shfl_down(best2,i,32); int bj=__shfl_down(bestj,i,32); if (best<b1){ best2=fminf(b1,best2); }else{ best=b1; best2=fminf(best,b2); bestj=bj; } } } if (threadIdx.x==0){ float delta=best2-best+tolerance; qhead++; qlen--; if (qhead>=n) qhead-=n; int old=matchrbuf[bestj]; pricer[bestj]+=delta; cnt++; if (old!=-1){ int ql=qlen; int tail=qhead+ql; qlen=ql+1; if (tail>=n) tail-=n; Queue[tail]=old; } if (cnt==(40*n)){ if (tolerance==1.0) qlen=0; tolerance=fminf(1.0,tolerance*100); cnt=0; } } __syncthreads(); if (threadIdx.x==0){ matchrbuf[bestj]=i; } } __syncthreads(); for (int j=threadIdx.x;j<n;j+=blockDim.x) matchr[bno*n+j]=matchrbuf[j]; for (int j=threadIdx.x;j<n;j+=blockDim.x) matchl[bno*n+matchrbuf[j]]=j; __syncthreads(); } } void AuctionMatchLauncher(int b,int n,const float * xyz1,const float * xyz2,int * matchl,int * matchr,float * cost){ AuctionMatchKernel<<<32,512>>>(b,n,xyz1,xyz2,matchl,matchr,cost); }
ea58b655c4e22c6a09c5838962ab34bf658b8790.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define WP 32 #define BM (WP-1) #define dt unsigned short #define dts 2 #define TGTG 1 #define TGNTG 0 #define LBMAX 0xffff #define LB2MAX 0xffffffff typedef unsigned int uint; typedef unsigned char uchar; typedef unsigned short ushort; #define W 1024 #define HWW 9 #define rep 2 #define HW (W/2) #define QW (W/4) #define HWP (WP/2) #define QWP (WP/4) #define EWP (WP/8) #define XWP (WP/16) #define TN (W/(rep*2)) #define WN (TN/WP) #define MASKAD (~(-1 << HWW)) __device__ __inline__ static void detectRuns1024(ushort* pt, uchar* ct, uchar* ps, uchar* px, ushort* tbf, uint cp2) { uint* ps4 = (uint*)ps; uint* pt2 = (uint*)pt; ushort* ct2 = (ushort*)ct; uint cp = cp2 >> 1;//threadIdx.x + threadIdx.y * WP; // uint cp2 = cp << 1; uint t0, t1, t2, t3, t4, t5; t0 = ps4[cp]; t0 = __byte_perm(t0, 0, 0x3120); t0 &= 0x1010101; t0 |= t0 >> 15; t0 |= t0 >> 6; t0 &= 0xf; t1 = px[cp]; t5 = 0; if ((t1 & 0x3))// && (t0 & 0x3) t5 = 0x1; if ((t1 & 0xc))// && (t0 & 0xc) t5 |= 0x100; ct2[cp] = t5;// | ((ct2[cp] & 0xf0f) << 4); t1 = t1 | t0; px[cp] = t0 | (px[cp] << 4); t3 = W; t4 = W; t5 = 0; if (t1 & 0x3) t3 = cp2; if (t1 & 0xc) t4 = cp2 + 1; if ((t1 & 0x6) == 0x6) t4 = t3; t0 = W; if (t1 & 0x8) t0 = t4; pt2[cp] = t0; t0 = __ballot_sync(0xffffffff, (t1 & 0xf) != 0xf); t0 <<= (WP - threadIdx.x); t0 = __clz(t0); t5 = (t0 == WP) && ((t1 & 0xf) == 0xf); t0 = max((int)0, ((int)threadIdx.x) - 1 - ((int)t0)); t2 = pt2[threadIdx.y * WP + t0]; if (t2 == W) t2 = (t0 + 1 + threadIdx.y * WP) * 2; // __syncthreads(); if (threadIdx.x != 0 && (t1 & 0x1)) { if (t3 == cp2) t3 = t2; if (t4 == cp2) t4 = t2; } if (threadIdx.x == (WP - 1)) { t5 = t5 ? t4 : t4 + HW; t5 = (t1 & 0x8) ? t5 : (W + HW); tbf[threadIdx.y] = t5; } __syncthreads(); if (threadIdx.y != 0) { t5 = tbf[threadIdx.x]; t0 = __ballot_sync(0xffffffff, (t5 & HW) != 0); t0 <<= (WP - threadIdx.y); t0 = __clz(t0); t0 = max((int)0, ((int)threadIdx.y) - 1 - ((int)t0)); t1 = tbf[t0]; if (t1 & W) t1 = (t0 + 1) * WP * 2; t1 = t1 & MASKAD; //remove tags cp2 = threadIdx.y * WP * 2; if (px[cp2 >> 1] & 0x11) { if (t3 == cp2) t3 = t1; if (t4 == cp2) t4 = t1; } } t4 <<= 16; t4 |= t3; pt2[cp] = t4; } __global__ static void pass1_1024(ushort* pt, uchar* ps, ushort* b, ushort* b2, uint h, ushort* eb) { __shared__ __align__(4) ushort lk0[HW]; //upper link __shared__ __align__(4) ushort lk1[HW]; //back up of current link // __shared__ __align__(4) ushort lk2[HW]; //new value of current link __shared__ __align__(4) ushort lk3[HW]; //down linked last block __shared__ __align__(4) ushort lk4[HW]; //botton link __shared__ uchar ct[HW]; //connection tag of each block __shared__ uchar px[QW]; //value of four pixels __shared__ ushort tbf[WP]; uint blockIdxx = blockIdx.x; uint cp = (threadIdx.x + threadIdx.y * WP) << 1; // if(blockIdxx != 3) // return; ps = ps + blockIdxx * h * W; pt = pt + blockIdxx * h * HW / 2; eb = eb + blockIdxx * HW * 9; h += 2; //calucate 2 more lines px[cp >> 1] = 0; // ((ushort*)ct)[cp>>1] = 0; detectRuns1024(lk0, ct, ps, px, tbf, cp); ps += W; /* {//no error //cp = threadIdx.x + threadIdx.y*WP; ((uint*)(eb))[cp>>1] = 0; ((uint*)(eb+HW))[cp>>1] = 0; ((uint*)(eb+2*HW))[cp>>1] = 0; ((uint*)(eb+3*HW))[cp>>1] = 0; ((uint*)(eb+4*HW))[cp>>1] = 0; ((uint*)(eb+5*HW))[cp>>1] = 0; ((uint*)(eb+6*HW))[cp>>1] = 0; ((uint*)(eb+7*HW))[cp>>1] = 0; ((uint*)(eb+8*HW))[cp>>1] = 0; } */ for (int hh = 1; hh < h; hh++) { detectRuns1024(lk1, ct, ps, px, tbf, cp); ps += W; uint lt0 = ((uint*)lk0)[cp >> 1]; //backup lk0 ((uint*)lk0)[cp >> 1] = lt0 | ((HW << 16) | HW);//((uint*)lk0)[cp>>1];//(W << 16) | W;//initialize lk2 __syncthreads(); uint lt1 = ((uint*)lk1)[cp >> 1]; //back up lk1, because lk1 will modified next { ushort t0; t0 = lt0; if (ct[cp]) {// every one write lk0 && lk1[lk0[cp]] > lk1[cp] lk0[t0] = lt1; } cp++; t0 = lt0 >> 16; if (ct[cp]) { lk0[t0] = lt1 >> 16; } cp--; __syncthreads(); } do { ushort t0, t1, t2; bool changed = 0; t1 = lt1; t0 = lt0; t2 = t1; if (ct[cp]) { t2 = lk0[t0]; if (t1 != t2) changed = 1; if (t2 < t1)//update self lk1[t1] = t2; } cp++; t1 = lt1 >> 16; t0 = lt0 >> 16; t2 = t1; if (ct[cp]) { t2 = lk0[t0]; if (t1 != t2) changed = 1; if (t2 < t1) lk1[t1] = t2; } cp--; changed = __syncthreads_or(changed); t1 = lt1; if (t1 < HW) t1 = lk1[t1]; lt1 = __byte_perm(lt1, t1, 0x3254); t1 = lt1 >> 16; if (t1 < HW) t1 = lk1[t1]; lt1 = __byte_perm(lt1, t1, 0x5410); if (!changed) break; t1 = lt1; t0 = lt0; if (ct[cp] && (lk0[t0] > t1)) {// min write lk0[t0] = t1; } cp++; t1 = lt1 >> 16; t0 = lt0 >> 16; if (ct[cp] && (lk0[t0] > t1)) { lk0[t0] = t1; } cp--; __syncthreads(); } while (1); ((uint*)lk1)[cp >> 1] = lt1; if ((hh & 0x1) && (hh != 1)) {//only odd line write out //resolve linkage by lk3, lk4 ushort t0, t1, t2; t0 = lk3[cp]; t2 = t0; if (t0 >= HW && t0 < W) t2 = lk3[t0 - HW]; if (t2 < HW) { t2 = lk0[t2]; if (t2 < HW) t0 = t2; else if (t0 < HW) t0 = cp | HW; } cp++; t1 = lk3[cp]; t2 = t1; if (t1 >= HW && t1 < W) t2 = lk3[t1 - HW]; if (t2 < HW) { t2 = lk0[t2]; if (t2 < HW) t1 = t2; else if (t1 < HW) t1 = cp | HW; } cp--; uint t3 = (t1 << 16) | t0; ((uint*)lk3)[cp >> 1] = t3;//write back, next iter, lk4 will use it //((uint*)pt)[cp>>1] = t3;//((uint*)lk3)[cp>>1]((uint*)lk3)[cp>>1] //pt += HW; } else ((uint*)lk3)[cp >> 1] = ((uint*)lk0)[cp >> 1];//t2; if((hh & 0x1) == 0) __syncthreads(); ((uint*)lk0)[cp >> 1] = ((uint*)lk1)[cp >> 1]; if (hh < 4) ((uint*)lk4)[cp >> 1] = ((uint*)lk3)[cp >> 1];//t2; else if ((hh & 0x1)) {// && (hh != 1) ushort t0, t1; t0 = lk4[cp]; t1 = t0; if (t0 < HW) { t0 = lk3[t0]; if (t0 >= HW && t0 < W) { lk3[t0 - HW] = 0x8000 | cp; t0 = t1 | 0x8000; } else if (t0 & 0x8000) t0 = t1 | 0x8000; } lk4[cp] = t0; cp++; t0 = lk4[cp]; t1 = t0; if (t0 < HW) { t0 = lk3[t0]; if (t0 >= HW && t0 < W) { lk3[t0 - HW] = 0x8000 | cp; t0 = t1 | 0x8000; } else if (t0 & 0x8000) t0 = t1 | 0x8000; } lk4[cp] = t0; cp--; } __syncthreads(); if ((hh & 0x1) && (hh != 1)) { ushort t0; ((uint*)pt)[cp >> 1] = ((uint*)lk3)[cp >> 1]; pt += HW; t0 = lk4[cp]; if (t0 & 0x8000) t0 = (lk3[t0 & MASKAD] & MASKAD) | HW; lk4[cp] = t0; cp++; t0 = lk4[cp]; if (t0 & 0x8000) t0 = (lk3[t0 & MASKAD] & MASKAD) | HW; lk4[cp] = t0; cp--; } } //write out info for pass2 { b += blockIdxx * 2 * HW; b2 += blockIdxx * 2 * HW; ((uint*)b)[cp >> 1] = ((uint*)lk4)[cp >> 1]; // ((uint*)b2)[cp>>1] = ((uint*)lk4)[cp>>1]; b += HW; b2 += HW; ((uint*)b)[cp >> 1] = ((uint*)lk1)[cp >> 1]; // ((uint*)b2)[cp>>1] = ((uint*)lk1)[cp>>1]; } // if(threadIdx.x == 0 && threadIdx.y == 0) // printf("%d end\n", blockIdx.x); } __global__ static void pass2_1024(ushort* ib, uint* glabel, uint h, uint pitch) { __shared__ uint lk0[HW]; //last tag __shared__ ushort lk1[HW]; //current link __shared__ uint lk2[HW]; //current tag __shared__ ushort lk3[HW]; //bottom flattened link to current __shared__ uint labels; uint cp = threadIdx.x; ushort* b = ib + (h * (pitch + 1) * blockIdx.x + 1) * HW; { ushort tmp = b[cp]; lk0[cp] = tmp; lk3[cp] = tmp; lk2[cp] = W; b += HW * pitch; } labels = 0; __syncthreads(); for (int i = 1; i < h; i++) { ushort tmp, tmp1; //load a new block info tmp = b[cp];//the upper line connection info to bottom line // lk1[cp] = tmp; if (i > 1) { tmp1 = lk3[cp]; //update if (tmp1 < HW) tmp1 = tmp; lk3[cp] = tmp1; } b += HW; //lk2[cp] = b[cp];//the bottom line tags, this proc try to unify this line tmp1 = b[cp]; lk2[cp] = tmp1 + HW;//tmp1 < HW ? tmp1 + HW : tmp1; // b+=HW*pitch; __syncthreads(); ushort lt0 = lk0[cp]; if (tmp < HW)//every one write lk2[tmp] = lt0; else if (tmp < W) lk1[tmp - HW] = lt0; __syncthreads(); do { bool changed = 0; if (tmp < HW) { changed = lk2[tmp] != lt0; if (lk2[tmp] < lt0) lk0[lt0] = lk2[tmp]; } else if (tmp < W) { changed = lk1[tmp - HW] != lt0; if (lk1[tmp - HW] < lt0) lk0[lt0] = lk1[tmp - HW]; } changed = __syncthreads_or(changed); if (lt0 < HW) lt0 = lk0[lt0]; if (!changed) break; if (tmp < HW) { if (lk2[tmp] > lt0) lk2[tmp] = lt0; } else if (tmp < W) { if (lt0 < lk1[tmp - HW]) lk1[tmp - HW] = lt0; } __syncthreads(); } while (1); //write out lk2 info, ???link back to bottom? b -= HW * (pitch + 1); b[cp] = lt0; b += HW * (pitch + pitch + 1); tmp = lk2[cp]; if (tmp < HW)// && tmp < W)// && lk2[cp] == cp, if dateset correct, this is not necessary lk0[tmp] = cp; //atomicMin(lk0+tmp, cp); __syncthreads(); //head link together tmp = lk2[cp]; if (tmp < HW) tmp = lk0[tmp]; else if (tmp < W) tmp = tmp - HW; lk2[cp] = tmp; __syncthreads(); //all leaf in lk2 updates tmp = lk2[cp]; if (tmp < HW) tmp = lk2[tmp]; lk0[cp] = tmp;//so lk0 contains the last block line tags // __syncthreads(); } b -= HW * pitch; b[cp] = lk0[cp]; b = ib + (h * (pitch + 1) * (blockIdx.x + 1) - 1) * HW; //back ward tag assign { ushort tmp = b[cp], res = LBMAX; if (tmp == cp) res = atomicInc(&labels, 0x8000);//(h-1) << 12; lk2[cp] = res; __syncthreads(); if (tmp < HW) res = lk2[tmp]; lk2[cp] = res; //last line tags b[cp] = res; b -= HW; } __syncthreads(); for (int i = h - 2; i >= 0; i--) { ushort tmp, tmp2;//, tmp4; tmp = b[cp]; //next link info //lk1[cp] = tmp; b -= HW * pitch; tmp2 = b[cp];//current link info lk1[cp] = tmp2; lk0[cp] = LBMAX;//final tags __syncthreads(); /* tmp4 = W; if(tmp >= HW && tmp < W){//race to resolve linked by current block tmp4 = tmp - HW; lk1[tmp4] = tmp2; } __syncthreads(); do{ bool changed = 0; if(tmp4 < HW){ changed = lk1[tmp4] != tmp2; if(tmp2 > lk1[tmp4]) lk1[tmp2] = lk1[tmp4]; } changed = __syncthreads_or(changed); if(tmp2 < HW) tmp2 = lk1[tmp2]; if(!changed) break; if(tmp4 < HW && tmp2 < lk1[tmp4]) lk1[tmp4] = tmp2; __syncthreads(); }while(1); */ if (tmp < HW) {//next linked if (tmp2 < HW) lk0[tmp2] = lk2[tmp]; // else // tmp2 = W; } __syncthreads(); if (tmp2 == cp && lk0[cp] == LBMAX) {//current linked lk0[cp] = atomicInc(&labels, 0x8000);//((h-1) << 12) - HW; } __syncthreads(); tmp = LBMAX; if (tmp2 < HW) tmp = lk0[tmp2]; //write out tags b[cp] = tmp; b -= HW; //switch buffer lk2[cp] = tmp; __syncthreads(); } //update first line link info // b = ib + (h*(pitch+1)*blockIdx.x) * HW; // b[cp] = lk3[cp]; *glabel = labels; } __global__ void static pass3_1024(ushort* pt, ushort* ps, ushort* b, uint* label, uint h) { __shared__ ushort cur[W / 2]; //last tag __shared__ ushort lst[W / 2]; //current link __shared__ ushort bot[W / 2]; //current tag __shared__ ushort lnk[W / 2]; //current link to last // __shared__ uint llabel; uint cp = (threadIdx.x + threadIdx.y * WP) << 1; pt = pt + (blockIdx.x * h + h - 1) * HW; ps = ps + (blockIdx.x * h + h - 1) * HW; if (blockIdx.x == 0) { ((uint*)bot)[cp >> 1] = LB2MAX; b += HW; } else { b += (blockIdx.x * 2 - 1) * HW; ((uint*)bot)[cp >> 1] = ((uint*)b)[cp >> 1]; b += 2 * HW; } ((uint*)cur)[cp >> 1] = ((uint*)b)[cp >> 1]; __syncthreads(); for (int i = h - 1; i >= 0; i--) { //load current link ((uint*)lnk)[cp >> 1] = ((uint*)ps)[cp >> 1]; ps -= HW; //switch cur last ((uint*)lst)[cp >> 1] = ((uint*)cur)[cp >> 1]; //clear cur ((uint*)cur)[cp >> 1] = LB2MAX; // llabel = 0; __syncthreads(); if (lnk[cp] < HW) {//link to last cur[cp] = lst[lnk[cp]]; } else if (lnk[cp] < W && lnk[cp] == (HW + cp)) {//link to local, and a head, assigning new label if (blockIdx.x != 0 && i == 0) cur[cp] = bot[cp]; else cur[cp] = atomicInc(label, LBMAX); } if (lnk[cp] & 0x8000) {//link to bottom if (blockIdx.x == 0) cur[cp] = atomicInc(label, LBMAX); else cur[cp] = bot[lnk[cp] & MASKAD]; } cp++; if (lnk[cp] < HW) {//link to last cur[cp] = lst[lnk[cp]]; } else if (lnk[cp] < W && lnk[cp] == (HW + cp)) {//link to local, and a head, assigning new label if (blockIdx.x != 0 && i == 0) cur[cp] = bot[cp]; else cur[cp] = atomicInc(label, LBMAX); } if (lnk[cp] & 0x8000) {//link to bottom if (blockIdx.x == 0) cur[cp] = atomicInc(label, LBMAX); else cur[cp] = bot[lnk[cp] & MASKAD]; } cp--; __syncthreads(); if (lnk[cp] >= HW && lnk[cp] < W) cur[cp] = cur[lnk[cp] - HW]; cp++; if (lnk[cp] >= HW && lnk[cp] < W) cur[cp] = cur[lnk[cp] - HW]; cp--; //write out ((uint*)pt)[cp >> 1] = ((uint*)cur)[cp >> 1]; pt -= HW; __syncthreads(); } } void chen_label_1024(uchar* cbpt, uchar* cbpt2, uchar* cbps, uchar* cbb, uchar* cbb2, uchar* cbglabel, uint h, uint bn, uchar* cbeb) { ushort* pt = (ushort*)cbpt; ushort* pt2 = (ushort*)cbpt2; uchar* ps = (uchar*)cbps; ushort* b = (ushort*)cbb; ushort* b2 = (ushort*)cbb2; uint* glabel = (uint*)cbglabel; ushort* eb = (ushort*)cbeb; dim3 threads(WP, TN / WP, 1); dim3 grid(bn, 1, 1); dim3 threads2(HW, 1, 1); dim3 grid2(1, 1, 1); pass1_1024 << <grid, threads >> > (pt2, ps, b, b2, (h - 2) / bn, eb); pass2_1024 << <grid2, threads2 >> > (b, glabel, bn, 1); pass3_1024 << <grid, threads >> > (pt, pt2, b, glabel, (h - 2) / (bn * 2)); }
ea58b655c4e22c6a09c5838962ab34bf658b8790.cu
#define WP 32 #define BM (WP-1) #define dt unsigned short #define dts 2 #define TGTG 1 #define TGNTG 0 #define LBMAX 0xffff #define LB2MAX 0xffffffff typedef unsigned int uint; typedef unsigned char uchar; typedef unsigned short ushort; #define W 1024 #define HWW 9 #define rep 2 #define HW (W/2) #define QW (W/4) #define HWP (WP/2) #define QWP (WP/4) #define EWP (WP/8) #define XWP (WP/16) #define TN (W/(rep*2)) #define WN (TN/WP) #define MASKAD (~(-1 << HWW)) __device__ __inline__ static void detectRuns1024(ushort* pt, uchar* ct, uchar* ps, uchar* px, ushort* tbf, uint cp2) { uint* ps4 = (uint*)ps; uint* pt2 = (uint*)pt; ushort* ct2 = (ushort*)ct; uint cp = cp2 >> 1;//threadIdx.x + threadIdx.y * WP; // uint cp2 = cp << 1; uint t0, t1, t2, t3, t4, t5; t0 = ps4[cp]; t0 = __byte_perm(t0, 0, 0x3120); t0 &= 0x1010101; t0 |= t0 >> 15; t0 |= t0 >> 6; t0 &= 0xf; t1 = px[cp]; t5 = 0; if ((t1 & 0x3))// && (t0 & 0x3) t5 = 0x1; if ((t1 & 0xc))// && (t0 & 0xc) t5 |= 0x100; ct2[cp] = t5;// | ((ct2[cp] & 0xf0f) << 4); t1 = t1 | t0; px[cp] = t0 | (px[cp] << 4); t3 = W; t4 = W; t5 = 0; if (t1 & 0x3) t3 = cp2; if (t1 & 0xc) t4 = cp2 + 1; if ((t1 & 0x6) == 0x6) t4 = t3; t0 = W; if (t1 & 0x8) t0 = t4; pt2[cp] = t0; t0 = __ballot_sync(0xffffffff, (t1 & 0xf) != 0xf); t0 <<= (WP - threadIdx.x); t0 = __clz(t0); t5 = (t0 == WP) && ((t1 & 0xf) == 0xf); t0 = max((int)0, ((int)threadIdx.x) - 1 - ((int)t0)); t2 = pt2[threadIdx.y * WP + t0]; if (t2 == W) t2 = (t0 + 1 + threadIdx.y * WP) * 2; // __syncthreads(); if (threadIdx.x != 0 && (t1 & 0x1)) { if (t3 == cp2) t3 = t2; if (t4 == cp2) t4 = t2; } if (threadIdx.x == (WP - 1)) { t5 = t5 ? t4 : t4 + HW; t5 = (t1 & 0x8) ? t5 : (W + HW); tbf[threadIdx.y] = t5; } __syncthreads(); if (threadIdx.y != 0) { t5 = tbf[threadIdx.x]; t0 = __ballot_sync(0xffffffff, (t5 & HW) != 0); t0 <<= (WP - threadIdx.y); t0 = __clz(t0); t0 = max((int)0, ((int)threadIdx.y) - 1 - ((int)t0)); t1 = tbf[t0]; if (t1 & W) t1 = (t0 + 1) * WP * 2; t1 = t1 & MASKAD; //remove tags cp2 = threadIdx.y * WP * 2; if (px[cp2 >> 1] & 0x11) { if (t3 == cp2) t3 = t1; if (t4 == cp2) t4 = t1; } } t4 <<= 16; t4 |= t3; pt2[cp] = t4; } __global__ static void pass1_1024(ushort* pt, uchar* ps, ushort* b, ushort* b2, uint h, ushort* eb) { __shared__ __align__(4) ushort lk0[HW]; //upper link __shared__ __align__(4) ushort lk1[HW]; //back up of current link // __shared__ __align__(4) ushort lk2[HW]; //new value of current link __shared__ __align__(4) ushort lk3[HW]; //down linked last block __shared__ __align__(4) ushort lk4[HW]; //botton link __shared__ uchar ct[HW]; //connection tag of each block __shared__ uchar px[QW]; //value of four pixels __shared__ ushort tbf[WP]; uint blockIdxx = blockIdx.x; uint cp = (threadIdx.x + threadIdx.y * WP) << 1; // if(blockIdxx != 3) // return; ps = ps + blockIdxx * h * W; pt = pt + blockIdxx * h * HW / 2; eb = eb + blockIdxx * HW * 9; h += 2; //calucate 2 more lines px[cp >> 1] = 0; // ((ushort*)ct)[cp>>1] = 0; detectRuns1024(lk0, ct, ps, px, tbf, cp); ps += W; /* {//no error //cp = threadIdx.x + threadIdx.y*WP; ((uint*)(eb))[cp>>1] = 0; ((uint*)(eb+HW))[cp>>1] = 0; ((uint*)(eb+2*HW))[cp>>1] = 0; ((uint*)(eb+3*HW))[cp>>1] = 0; ((uint*)(eb+4*HW))[cp>>1] = 0; ((uint*)(eb+5*HW))[cp>>1] = 0; ((uint*)(eb+6*HW))[cp>>1] = 0; ((uint*)(eb+7*HW))[cp>>1] = 0; ((uint*)(eb+8*HW))[cp>>1] = 0; } */ for (int hh = 1; hh < h; hh++) { detectRuns1024(lk1, ct, ps, px, tbf, cp); ps += W; uint lt0 = ((uint*)lk0)[cp >> 1]; //backup lk0 ((uint*)lk0)[cp >> 1] = lt0 | ((HW << 16) | HW);//((uint*)lk0)[cp>>1];//(W << 16) | W;//initialize lk2 __syncthreads(); uint lt1 = ((uint*)lk1)[cp >> 1]; //back up lk1, because lk1 will modified next { ushort t0; t0 = lt0; if (ct[cp]) {// every one write lk0 && lk1[lk0[cp]] > lk1[cp] lk0[t0] = lt1; } cp++; t0 = lt0 >> 16; if (ct[cp]) { lk0[t0] = lt1 >> 16; } cp--; __syncthreads(); } do { ushort t0, t1, t2; bool changed = 0; t1 = lt1; t0 = lt0; t2 = t1; if (ct[cp]) { t2 = lk0[t0]; if (t1 != t2) changed = 1; if (t2 < t1)//update self lk1[t1] = t2; } cp++; t1 = lt1 >> 16; t0 = lt0 >> 16; t2 = t1; if (ct[cp]) { t2 = lk0[t0]; if (t1 != t2) changed = 1; if (t2 < t1) lk1[t1] = t2; } cp--; changed = __syncthreads_or(changed); t1 = lt1; if (t1 < HW) t1 = lk1[t1]; lt1 = __byte_perm(lt1, t1, 0x3254); t1 = lt1 >> 16; if (t1 < HW) t1 = lk1[t1]; lt1 = __byte_perm(lt1, t1, 0x5410); if (!changed) break; t1 = lt1; t0 = lt0; if (ct[cp] && (lk0[t0] > t1)) {// min write lk0[t0] = t1; } cp++; t1 = lt1 >> 16; t0 = lt0 >> 16; if (ct[cp] && (lk0[t0] > t1)) { lk0[t0] = t1; } cp--; __syncthreads(); } while (1); ((uint*)lk1)[cp >> 1] = lt1; if ((hh & 0x1) && (hh != 1)) {//only odd line write out //resolve linkage by lk3, lk4 ushort t0, t1, t2; t0 = lk3[cp]; t2 = t0; if (t0 >= HW && t0 < W) t2 = lk3[t0 - HW]; if (t2 < HW) { t2 = lk0[t2]; if (t2 < HW) t0 = t2; else if (t0 < HW) t0 = cp | HW; } cp++; t1 = lk3[cp]; t2 = t1; if (t1 >= HW && t1 < W) t2 = lk3[t1 - HW]; if (t2 < HW) { t2 = lk0[t2]; if (t2 < HW) t1 = t2; else if (t1 < HW) t1 = cp | HW; } cp--; uint t3 = (t1 << 16) | t0; ((uint*)lk3)[cp >> 1] = t3;//write back, next iter, lk4 will use it //((uint*)pt)[cp>>1] = t3;//((uint*)lk3)[cp>>1]((uint*)lk3)[cp>>1] //pt += HW; } else ((uint*)lk3)[cp >> 1] = ((uint*)lk0)[cp >> 1];//t2; if((hh & 0x1) == 0) __syncthreads(); ((uint*)lk0)[cp >> 1] = ((uint*)lk1)[cp >> 1]; if (hh < 4) ((uint*)lk4)[cp >> 1] = ((uint*)lk3)[cp >> 1];//t2; else if ((hh & 0x1)) {// && (hh != 1) ushort t0, t1; t0 = lk4[cp]; t1 = t0; if (t0 < HW) { t0 = lk3[t0]; if (t0 >= HW && t0 < W) { lk3[t0 - HW] = 0x8000 | cp; t0 = t1 | 0x8000; } else if (t0 & 0x8000) t0 = t1 | 0x8000; } lk4[cp] = t0; cp++; t0 = lk4[cp]; t1 = t0; if (t0 < HW) { t0 = lk3[t0]; if (t0 >= HW && t0 < W) { lk3[t0 - HW] = 0x8000 | cp; t0 = t1 | 0x8000; } else if (t0 & 0x8000) t0 = t1 | 0x8000; } lk4[cp] = t0; cp--; } __syncthreads(); if ((hh & 0x1) && (hh != 1)) { ushort t0; ((uint*)pt)[cp >> 1] = ((uint*)lk3)[cp >> 1]; pt += HW; t0 = lk4[cp]; if (t0 & 0x8000) t0 = (lk3[t0 & MASKAD] & MASKAD) | HW; lk4[cp] = t0; cp++; t0 = lk4[cp]; if (t0 & 0x8000) t0 = (lk3[t0 & MASKAD] & MASKAD) | HW; lk4[cp] = t0; cp--; } } //write out info for pass2 { b += blockIdxx * 2 * HW; b2 += blockIdxx * 2 * HW; ((uint*)b)[cp >> 1] = ((uint*)lk4)[cp >> 1]; // ((uint*)b2)[cp>>1] = ((uint*)lk4)[cp>>1]; b += HW; b2 += HW; ((uint*)b)[cp >> 1] = ((uint*)lk1)[cp >> 1]; // ((uint*)b2)[cp>>1] = ((uint*)lk1)[cp>>1]; } // if(threadIdx.x == 0 && threadIdx.y == 0) // printf("%d end\n", blockIdx.x); } __global__ static void pass2_1024(ushort* ib, uint* glabel, uint h, uint pitch) { __shared__ uint lk0[HW]; //last tag __shared__ ushort lk1[HW]; //current link __shared__ uint lk2[HW]; //current tag __shared__ ushort lk3[HW]; //bottom flattened link to current __shared__ uint labels; uint cp = threadIdx.x; ushort* b = ib + (h * (pitch + 1) * blockIdx.x + 1) * HW; { ushort tmp = b[cp]; lk0[cp] = tmp; lk3[cp] = tmp; lk2[cp] = W; b += HW * pitch; } labels = 0; __syncthreads(); for (int i = 1; i < h; i++) { ushort tmp, tmp1; //load a new block info tmp = b[cp];//the upper line connection info to bottom line // lk1[cp] = tmp; if (i > 1) { tmp1 = lk3[cp]; //update if (tmp1 < HW) tmp1 = tmp; lk3[cp] = tmp1; } b += HW; //lk2[cp] = b[cp];//the bottom line tags, this proc try to unify this line tmp1 = b[cp]; lk2[cp] = tmp1 + HW;//tmp1 < HW ? tmp1 + HW : tmp1; // b+=HW*pitch; __syncthreads(); ushort lt0 = lk0[cp]; if (tmp < HW)//every one write lk2[tmp] = lt0; else if (tmp < W) lk1[tmp - HW] = lt0; __syncthreads(); do { bool changed = 0; if (tmp < HW) { changed = lk2[tmp] != lt0; if (lk2[tmp] < lt0) lk0[lt0] = lk2[tmp]; } else if (tmp < W) { changed = lk1[tmp - HW] != lt0; if (lk1[tmp - HW] < lt0) lk0[lt0] = lk1[tmp - HW]; } changed = __syncthreads_or(changed); if (lt0 < HW) lt0 = lk0[lt0]; if (!changed) break; if (tmp < HW) { if (lk2[tmp] > lt0) lk2[tmp] = lt0; } else if (tmp < W) { if (lt0 < lk1[tmp - HW]) lk1[tmp - HW] = lt0; } __syncthreads(); } while (1); //write out lk2 info, ???link back to bottom? b -= HW * (pitch + 1); b[cp] = lt0; b += HW * (pitch + pitch + 1); tmp = lk2[cp]; if (tmp < HW)// && tmp < W)// && lk2[cp] == cp, if dateset correct, this is not necessary lk0[tmp] = cp; //atomicMin(lk0+tmp, cp); __syncthreads(); //head link together tmp = lk2[cp]; if (tmp < HW) tmp = lk0[tmp]; else if (tmp < W) tmp = tmp - HW; lk2[cp] = tmp; __syncthreads(); //all leaf in lk2 updates tmp = lk2[cp]; if (tmp < HW) tmp = lk2[tmp]; lk0[cp] = tmp;//so lk0 contains the last block line tags // __syncthreads(); } b -= HW * pitch; b[cp] = lk0[cp]; b = ib + (h * (pitch + 1) * (blockIdx.x + 1) - 1) * HW; //back ward tag assign { ushort tmp = b[cp], res = LBMAX; if (tmp == cp) res = atomicInc(&labels, 0x8000);//(h-1) << 12; lk2[cp] = res; __syncthreads(); if (tmp < HW) res = lk2[tmp]; lk2[cp] = res; //last line tags b[cp] = res; b -= HW; } __syncthreads(); for (int i = h - 2; i >= 0; i--) { ushort tmp, tmp2;//, tmp4; tmp = b[cp]; //next link info //lk1[cp] = tmp; b -= HW * pitch; tmp2 = b[cp];//current link info lk1[cp] = tmp2; lk0[cp] = LBMAX;//final tags __syncthreads(); /* tmp4 = W; if(tmp >= HW && tmp < W){//race to resolve linked by current block tmp4 = tmp - HW; lk1[tmp4] = tmp2; } __syncthreads(); do{ bool changed = 0; if(tmp4 < HW){ changed = lk1[tmp4] != tmp2; if(tmp2 > lk1[tmp4]) lk1[tmp2] = lk1[tmp4]; } changed = __syncthreads_or(changed); if(tmp2 < HW) tmp2 = lk1[tmp2]; if(!changed) break; if(tmp4 < HW && tmp2 < lk1[tmp4]) lk1[tmp4] = tmp2; __syncthreads(); }while(1); */ if (tmp < HW) {//next linked if (tmp2 < HW) lk0[tmp2] = lk2[tmp]; // else // tmp2 = W; } __syncthreads(); if (tmp2 == cp && lk0[cp] == LBMAX) {//current linked lk0[cp] = atomicInc(&labels, 0x8000);//((h-1) << 12) - HW; } __syncthreads(); tmp = LBMAX; if (tmp2 < HW) tmp = lk0[tmp2]; //write out tags b[cp] = tmp; b -= HW; //switch buffer lk2[cp] = tmp; __syncthreads(); } //update first line link info // b = ib + (h*(pitch+1)*blockIdx.x) * HW; // b[cp] = lk3[cp]; *glabel = labels; } __global__ void static pass3_1024(ushort* pt, ushort* ps, ushort* b, uint* label, uint h) { __shared__ ushort cur[W / 2]; //last tag __shared__ ushort lst[W / 2]; //current link __shared__ ushort bot[W / 2]; //current tag __shared__ ushort lnk[W / 2]; //current link to last // __shared__ uint llabel; uint cp = (threadIdx.x + threadIdx.y * WP) << 1; pt = pt + (blockIdx.x * h + h - 1) * HW; ps = ps + (blockIdx.x * h + h - 1) * HW; if (blockIdx.x == 0) { ((uint*)bot)[cp >> 1] = LB2MAX; b += HW; } else { b += (blockIdx.x * 2 - 1) * HW; ((uint*)bot)[cp >> 1] = ((uint*)b)[cp >> 1]; b += 2 * HW; } ((uint*)cur)[cp >> 1] = ((uint*)b)[cp >> 1]; __syncthreads(); for (int i = h - 1; i >= 0; i--) { //load current link ((uint*)lnk)[cp >> 1] = ((uint*)ps)[cp >> 1]; ps -= HW; //switch cur last ((uint*)lst)[cp >> 1] = ((uint*)cur)[cp >> 1]; //clear cur ((uint*)cur)[cp >> 1] = LB2MAX; // llabel = 0; __syncthreads(); if (lnk[cp] < HW) {//link to last cur[cp] = lst[lnk[cp]]; } else if (lnk[cp] < W && lnk[cp] == (HW + cp)) {//link to local, and a head, assigning new label if (blockIdx.x != 0 && i == 0) cur[cp] = bot[cp]; else cur[cp] = atomicInc(label, LBMAX); } if (lnk[cp] & 0x8000) {//link to bottom if (blockIdx.x == 0) cur[cp] = atomicInc(label, LBMAX); else cur[cp] = bot[lnk[cp] & MASKAD]; } cp++; if (lnk[cp] < HW) {//link to last cur[cp] = lst[lnk[cp]]; } else if (lnk[cp] < W && lnk[cp] == (HW + cp)) {//link to local, and a head, assigning new label if (blockIdx.x != 0 && i == 0) cur[cp] = bot[cp]; else cur[cp] = atomicInc(label, LBMAX); } if (lnk[cp] & 0x8000) {//link to bottom if (blockIdx.x == 0) cur[cp] = atomicInc(label, LBMAX); else cur[cp] = bot[lnk[cp] & MASKAD]; } cp--; __syncthreads(); if (lnk[cp] >= HW && lnk[cp] < W) cur[cp] = cur[lnk[cp] - HW]; cp++; if (lnk[cp] >= HW && lnk[cp] < W) cur[cp] = cur[lnk[cp] - HW]; cp--; //write out ((uint*)pt)[cp >> 1] = ((uint*)cur)[cp >> 1]; pt -= HW; __syncthreads(); } } void chen_label_1024(uchar* cbpt, uchar* cbpt2, uchar* cbps, uchar* cbb, uchar* cbb2, uchar* cbglabel, uint h, uint bn, uchar* cbeb) { ushort* pt = (ushort*)cbpt; ushort* pt2 = (ushort*)cbpt2; uchar* ps = (uchar*)cbps; ushort* b = (ushort*)cbb; ushort* b2 = (ushort*)cbb2; uint* glabel = (uint*)cbglabel; ushort* eb = (ushort*)cbeb; dim3 threads(WP, TN / WP, 1); dim3 grid(bn, 1, 1); dim3 threads2(HW, 1, 1); dim3 grid2(1, 1, 1); pass1_1024 << <grid, threads >> > (pt2, ps, b, b2, (h - 2) / bn, eb); pass2_1024 << <grid2, threads2 >> > (b, glabel, bn, 1); pass3_1024 << <grid, threads >> > (pt, pt2, b, glabel, (h - 2) / (bn * 2)); }
2c6f5a61bb87f70dd6a16bb427440847a87d78a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include<cuda.h> #include<cuda_runtime.h> #include<time.h> #include<stdlib.h> #define BLOCK_NUM 32 // #define THREAD_NUM 32 // #define R_SIZE 1024//BLOCK_NUM * THREAD_NUM #define M_SIZE R_SIZE * R_SIZE __global__ void mat_mul(int *mat1, int *mat2, int *result) { const int bid = blockIdx.x; const int tid = threadIdx.x; // const int row = bid * THREAD_NUM + tid; for (int c = 0; c < R_SIZE; c++) { for (int n = 0; n < R_SIZE; n++) { result[row*R_SIZE+c] += mat1[row*R_SIZE+n] * mat2[n*R_SIZE+c]; } } } int main(int argc, char *argv[]) { int *mat1, *mat2, *result; int *g_mat1, *g_mat2, *g_mat_result; // mat1 = (int*) malloc(M_SIZE * sizeof(int)); mat2 = (int*) malloc(M_SIZE * sizeof(int)); result = (int*) malloc(M_SIZE * sizeof(int)); // initialize for (int i = 0; i < M_SIZE; i++) { mat1[i] = rand()/1000000; mat2[i] = rand()/1000000; result[i] = 0; } clock_t start, finish; start = clock(); hipMalloc((void **)&g_mat1, sizeof(int) * M_SIZE); hipMalloc((void **)&g_mat2, sizeof(int) * M_SIZE); hipMalloc((void **)&g_mat_result, sizeof(int) * M_SIZE); hipMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, hipMemcpyHostToDevice); hipMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, hipMemcpyHostToDevice); hipLaunchKernelGGL(( mat_mul), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, g_mat1, g_mat2, g_mat_result); hipMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, hipMemcpyDeviceToHost); finish = clock(); printf("total times: %.3f\n",(double)(finish-start)/CLOCKS_PER_SEC); for(int i=0;i<10;i++) printf("%d ",result[i]); printf("\n"); hipFree(g_mat1);hipFree(g_mat2);hipFree(g_mat_result); free(mat1); free(mat2); free(result); return 0; }
2c6f5a61bb87f70dd6a16bb427440847a87d78a9.cu
#include <stdio.h> #include<cuda.h> #include<cuda_runtime.h> #include<time.h> #include<stdlib.h> #define BLOCK_NUM 32 //块数量 #define THREAD_NUM 32 // 每个块中的线程数 #define R_SIZE 1024//BLOCK_NUM * THREAD_NUM #define M_SIZE R_SIZE * R_SIZE __global__ void mat_mul(int *mat1, int *mat2, int *result) { const int bid = blockIdx.x; const int tid = threadIdx.x; // 每个线程计算一行 const int row = bid * THREAD_NUM + tid; for (int c = 0; c < R_SIZE; c++) { for (int n = 0; n < R_SIZE; n++) { result[row*R_SIZE+c] += mat1[row*R_SIZE+n] * mat2[n*R_SIZE+c]; } } } int main(int argc, char *argv[]) { int *mat1, *mat2, *result; int *g_mat1, *g_mat2, *g_mat_result; // 用一位数组表示二维矩阵 mat1 = (int*) malloc(M_SIZE * sizeof(int)); mat2 = (int*) malloc(M_SIZE * sizeof(int)); result = (int*) malloc(M_SIZE * sizeof(int)); // initialize for (int i = 0; i < M_SIZE; i++) { mat1[i] = rand()/1000000; mat2[i] = rand()/1000000; result[i] = 0; } clock_t start, finish; start = clock(); cudaMalloc((void **)&g_mat1, sizeof(int) * M_SIZE); cudaMalloc((void **)&g_mat2, sizeof(int) * M_SIZE); cudaMalloc((void **)&g_mat_result, sizeof(int) * M_SIZE); cudaMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice); mat_mul<<<BLOCK_NUM, THREAD_NUM>>>(g_mat1, g_mat2, g_mat_result); cudaMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, cudaMemcpyDeviceToHost); finish = clock(); printf("total times: %.3f\n",(double)(finish-start)/CLOCKS_PER_SEC); for(int i=0;i<10;i++) printf("%d ",result[i]); printf("\n"); cudaFree(g_mat1);cudaFree(g_mat2);cudaFree(g_mat_result); free(mat1); free(mat2); free(result); return 0; }
560e968859fef4d2a71310c282b6ba7a782b6b47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SyncMem.cpp * * Created on: 2016. 8. 24. * Author: jhkim */ #include <math_functions.hpp> #include <cfloat> #include "SyncMem.h" //#define SYNCMEM_LOG #define MEM_MAX (FLT_MAX / 10) template <typename Dtype> __global__ void BoundMem(Dtype* mem, const Dtype bound, uint32_t* updateCount, const unsigned int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; if(mem[idx] > bound) { mem[idx] = bound; *updateCount++; } else if(mem[idx] < -bound) { mem[idx] = -bound; *updateCount++; } } template <> uint32_t SyncMem<float>::bound_mem() { float* d_mem = mutable_device_mem(); const float bound = MEM_MAX; _h_int = 0; checkCudaErrors(hipMemcpy(_d_int, &_h_int, sizeof(uint32_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( BoundMem), dim3(SOOOA_GET_BLOCKS((unsigned int)_size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0, d_mem, bound, _d_int, (unsigned int)_size); CUDA_POST_KERNEL_CHECK; checkCudaErrors(hipMemcpyAsync(&_h_int, _d_int, sizeof(uint32_t), hipMemcpyDeviceToHost)); return _h_int; } template <> uint32_t SyncMem<int>::bound_mem() { assert(false && "SyncMem<int>::bound_mem() is not supported ... "); }
560e968859fef4d2a71310c282b6ba7a782b6b47.cu
/* * SyncMem.cpp * * Created on: 2016. 8. 24. * Author: jhkim */ #include <math_functions.hpp> #include <cfloat> #include "SyncMem.h" //#define SYNCMEM_LOG #define MEM_MAX (FLT_MAX / 10) template <typename Dtype> __global__ void BoundMem(Dtype* mem, const Dtype bound, uint32_t* updateCount, const unsigned int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; if(mem[idx] > bound) { mem[idx] = bound; *updateCount++; } else if(mem[idx] < -bound) { mem[idx] = -bound; *updateCount++; } } template <> uint32_t SyncMem<float>::bound_mem() { float* d_mem = mutable_device_mem(); const float bound = MEM_MAX; _h_int = 0; checkCudaErrors(cudaMemcpy(_d_int, &_h_int, sizeof(uint32_t), cudaMemcpyHostToDevice)); BoundMem<<<SOOOA_GET_BLOCKS((unsigned int)_size), SOOOA_CUDA_NUM_THREADS>>>( d_mem, bound, _d_int, (unsigned int)_size); CUDA_POST_KERNEL_CHECK; checkCudaErrors(cudaMemcpyAsync(&_h_int, _d_int, sizeof(uint32_t), cudaMemcpyDeviceToHost)); return _h_int; } template <> uint32_t SyncMem<int>::bound_mem() { assert(false && "SyncMem<int>::bound_mem() is not supported ... "); }
b1e5ce0a77d35e16eea98437931297d02765342c.hip
// !!! This is a file automatically generated by hipify!!! /* #include <cstdlib> #include <cstdio> #include "dock.h" #include "gpu.cuh" #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> */ __device__ void InitAcs_d (const int bidx) { if (blockIdx.x == 0) { for (int i = bidx; i < MAXREP; i += TperB) { acs_mc_dc[i] = 0; acs_temp_exchg_dc[i] = 0; } } } __device__ void InitLigRecord_d (const int bidx, const int myreplica, const int rep_begin) { for (int s2s3 = 0; s2s3 < steps_per_dump_dc; ++s2s3) { LigRecordSingleStep *myrecord = &ligrecord_dc[myreplica - rep_begin].step[s2s3]; myrecord->replica.idx_rep = 0; myrecord->replica.idx_prt = 0; myrecord->replica.idx_tmp = 0; myrecord->replica.idx_lig = 0; for (int i = 0; i < MAXWEI; ++i) myrecord->energy.e[i] = 0.0f; for (int i = 0; i < 6; ++i) myrecord->movematrix[i] = 0.0f; myrecord->step = 0; } } /* __forceinline__ __device__ void BackupLigCoord_d (const int bidx, Ligand *mylig) { const LigCoord *src = &mylig->coord_old; LigCoord *dst = &mylig->coord_bkup; for (int atom = bidx; atom < lna_dc; atom += TperB) { dst->x[atom] = src->x[atom]; dst->y[atom] = src->y[atom]; dst->z[atom] = src->z[atom]; } if (bidx < 3) dst->center[bidx] = src->center[bidx]; } */ __device__ void RecordLigand_d (const int bidx, const int s1, const int s2s3, const int myreplica, const int rep_begin, const Ligand * mylig) { /* if (bidx == 0) // && myreplica == 0) printf ("rep %d, iter %d, rep_begin %d, n_rep %d, idx_rep %d\n", myreplica, s2, rep_begin, n_rep_dc, replica_dc[myreplica].idx_rep); if (myreplica == 0) { PrintEnergy2_d (bidx, mylig, myreplica, s1 + s2s3, 2); } */ if (bidx == 0) { LigRecordSingleStep *myrecord = &ligrecord_dc[myreplica - rep_begin].step[s2s3]; myrecord->replica = replica_dc[myreplica]; myrecord->energy = mylig->energy_old; for (int i = 0; i < 6; ++i) myrecord->movematrix[i] = mylig->movematrix_old[i]; myrecord->step = s1 + s2s3; } } __forceinline__ __device__ float MyRand_d () { #if IS_RANDOM == 0 float randdd = 20.0f; // float randdd = 0.0f; #elif IS_RANDOM == 1 const int gidx = blockDim.x * blockDim.y * blockIdx.x + blockDim.x * threadIdx.y + threadIdx.x; hiprandState_t myseed = curandstate_dc[gidx]; float randdd = hiprand_uniform (&myseed); curandstate_dc[gidx] = myseed; #endif // printf("%f\n", randdd); return randdd; } /* __forceinline__ __device__ int Mininal_int_d (const int a, const int b) { return a < b ? a : b; } */ __forceinline__ __device__ void SumReduction1D_d (const int bidx, float *a) { __syncthreads (); for (int stride = TperB / 2; stride >= 1; stride >>= 1) { if (bidx < stride) a[bidx] += a[stride + bidx]; __syncthreads (); } } __forceinline__ __device__ void SumReduction_int_1D_4_d (const int bidx, int *a, int *b, int *c, int *d) { __syncthreads (); for (int stride = TperB / 2; stride >= 1; stride >>= 1) { if (bidx < stride) { a[bidx] += a[stride + bidx]; b[bidx] += b[stride + bidx]; c[bidx] += c[stride + bidx]; d[bidx] += d[stride + bidx]; } __syncthreads (); } } __forceinline__ __device__ void SumReduction1D_5_d (const int bidx, float *a, float *b, float *c, float *d, float *e) { __syncthreads (); for (int stride = TperB / 2; stride >= 1; stride >>= 1) { if (bidx < stride) { a[bidx] += a[stride + bidx]; b[bidx] += b[stride + bidx]; c[bidx] += c[stride + bidx]; d[bidx] += d[stride + bidx]; e[bidx] += e[stride + bidx]; } __syncthreads (); } } __forceinline__ __device__ void SumReduction2D_d (float a[BDy][BDx]) { __syncthreads (); for (int stride = BDx / 2; stride >= 1; stride >>= 1) { if (threadIdx.x < stride) { a[threadIdx.y][threadIdx.x] += a[threadIdx.y][stride + threadIdx.x]; } __syncthreads (); } } __forceinline__ __device__ void SumReduction2D_2_d (float a[BDy][BDx], float b[BDy][BDx]) { __syncthreads (); for (int stride = BDx / 2; stride >= 1; stride >>= 1) { if (threadIdx.x < stride) { a[threadIdx.y][threadIdx.x] += a[threadIdx.y][stride + threadIdx.x]; b[threadIdx.y][threadIdx.x] += b[threadIdx.y][stride + threadIdx.x]; } __syncthreads (); } } __forceinline__ __device__ float NormPdf(float x, float loc, float scale){ float norm_para, prob, pdf_val; norm_para = 1/(scale * sqrt(2 * PI)); prob = exp (0.f - (x - loc) * (x - loc) / (2 * scale * scale)); pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float CauchyPdf(float x, float loc, float scale){ float norm_para, prob, pdf_val; norm_para = 1/(PI * scale); prob = 1/(1 + ( (x-loc)/scale) * ((x-loc)/scale) ); pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float LogisticPdf(float x, float loc, float scale){ float norm_para, e_power, prob, pdf_val; norm_para = 1/scale; e_power = exp( -(x - loc) / scale); prob = e_power / powf(1+e_power, 2.0); pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float WaldPdf(float x, float loc, float scale){ float norm_para, prob, pdf_val; float normed_x = (x - loc)/scale; norm_para = 1/(sqrt( 2 * PI * powf(normed_x, 3.0) ) * scale); prob = exp(-pow(normed_x-1, 2)/(2*normed_x)); if (normed_x < 0) pdf_val = 0.00000001f; else pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float LaplacePdf(float x, float loc, float scale){ float normed_x, pdf_val; normed_x = fabs(x-loc) / scale; pdf_val = (1/(2 * scale)) * exp(- normed_x); return pdf_val; }
b1e5ce0a77d35e16eea98437931297d02765342c.cu
/* #include <cstdlib> #include <cstdio> #include "dock.h" #include "gpu.cuh" #include <cuda.h> #include <curand.h> #include <curand_kernel.h> */ __device__ void InitAcs_d (const int bidx) { if (blockIdx.x == 0) { for (int i = bidx; i < MAXREP; i += TperB) { acs_mc_dc[i] = 0; acs_temp_exchg_dc[i] = 0; } } } __device__ void InitLigRecord_d (const int bidx, const int myreplica, const int rep_begin) { for (int s2s3 = 0; s2s3 < steps_per_dump_dc; ++s2s3) { LigRecordSingleStep *myrecord = &ligrecord_dc[myreplica - rep_begin].step[s2s3]; myrecord->replica.idx_rep = 0; myrecord->replica.idx_prt = 0; myrecord->replica.idx_tmp = 0; myrecord->replica.idx_lig = 0; for (int i = 0; i < MAXWEI; ++i) myrecord->energy.e[i] = 0.0f; for (int i = 0; i < 6; ++i) myrecord->movematrix[i] = 0.0f; myrecord->step = 0; } } /* __forceinline__ __device__ void BackupLigCoord_d (const int bidx, Ligand *mylig) { const LigCoord *src = &mylig->coord_old; LigCoord *dst = &mylig->coord_bkup; for (int atom = bidx; atom < lna_dc; atom += TperB) { dst->x[atom] = src->x[atom]; dst->y[atom] = src->y[atom]; dst->z[atom] = src->z[atom]; } if (bidx < 3) dst->center[bidx] = src->center[bidx]; } */ __device__ void RecordLigand_d (const int bidx, const int s1, const int s2s3, const int myreplica, const int rep_begin, const Ligand * mylig) { /* if (bidx == 0) // && myreplica == 0) printf ("rep %d, iter %d, rep_begin %d, n_rep %d, idx_rep %d\n", myreplica, s2, rep_begin, n_rep_dc, replica_dc[myreplica].idx_rep); if (myreplica == 0) { PrintEnergy2_d (bidx, mylig, myreplica, s1 + s2s3, 2); } */ if (bidx == 0) { LigRecordSingleStep *myrecord = &ligrecord_dc[myreplica - rep_begin].step[s2s3]; myrecord->replica = replica_dc[myreplica]; myrecord->energy = mylig->energy_old; for (int i = 0; i < 6; ++i) myrecord->movematrix[i] = mylig->movematrix_old[i]; myrecord->step = s1 + s2s3; } } __forceinline__ __device__ float MyRand_d () { #if IS_RANDOM == 0 float randdd = 20.0f; // float randdd = 0.0f; #elif IS_RANDOM == 1 const int gidx = blockDim.x * blockDim.y * blockIdx.x + blockDim.x * threadIdx.y + threadIdx.x; curandState myseed = curandstate_dc[gidx]; float randdd = curand_uniform (&myseed); curandstate_dc[gidx] = myseed; #endif // printf("%f\n", randdd); return randdd; } /* __forceinline__ __device__ int Mininal_int_d (const int a, const int b) { return a < b ? a : b; } */ __forceinline__ __device__ void SumReduction1D_d (const int bidx, float *a) { __syncthreads (); for (int stride = TperB / 2; stride >= 1; stride >>= 1) { if (bidx < stride) a[bidx] += a[stride + bidx]; __syncthreads (); } } __forceinline__ __device__ void SumReduction_int_1D_4_d (const int bidx, int *a, int *b, int *c, int *d) { __syncthreads (); for (int stride = TperB / 2; stride >= 1; stride >>= 1) { if (bidx < stride) { a[bidx] += a[stride + bidx]; b[bidx] += b[stride + bidx]; c[bidx] += c[stride + bidx]; d[bidx] += d[stride + bidx]; } __syncthreads (); } } __forceinline__ __device__ void SumReduction1D_5_d (const int bidx, float *a, float *b, float *c, float *d, float *e) { __syncthreads (); for (int stride = TperB / 2; stride >= 1; stride >>= 1) { if (bidx < stride) { a[bidx] += a[stride + bidx]; b[bidx] += b[stride + bidx]; c[bidx] += c[stride + bidx]; d[bidx] += d[stride + bidx]; e[bidx] += e[stride + bidx]; } __syncthreads (); } } __forceinline__ __device__ void SumReduction2D_d (float a[BDy][BDx]) { __syncthreads (); for (int stride = BDx / 2; stride >= 1; stride >>= 1) { if (threadIdx.x < stride) { a[threadIdx.y][threadIdx.x] += a[threadIdx.y][stride + threadIdx.x]; } __syncthreads (); } } __forceinline__ __device__ void SumReduction2D_2_d (float a[BDy][BDx], float b[BDy][BDx]) { __syncthreads (); for (int stride = BDx / 2; stride >= 1; stride >>= 1) { if (threadIdx.x < stride) { a[threadIdx.y][threadIdx.x] += a[threadIdx.y][stride + threadIdx.x]; b[threadIdx.y][threadIdx.x] += b[threadIdx.y][stride + threadIdx.x]; } __syncthreads (); } } __forceinline__ __device__ float NormPdf(float x, float loc, float scale){ float norm_para, prob, pdf_val; norm_para = 1/(scale * sqrt(2 * PI)); prob = exp (0.f - (x - loc) * (x - loc) / (2 * scale * scale)); pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float CauchyPdf(float x, float loc, float scale){ float norm_para, prob, pdf_val; norm_para = 1/(PI * scale); prob = 1/(1 + ( (x-loc)/scale) * ((x-loc)/scale) ); pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float LogisticPdf(float x, float loc, float scale){ float norm_para, e_power, prob, pdf_val; norm_para = 1/scale; e_power = exp( -(x - loc) / scale); prob = e_power / powf(1+e_power, 2.0); pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float WaldPdf(float x, float loc, float scale){ float norm_para, prob, pdf_val; float normed_x = (x - loc)/scale; norm_para = 1/(sqrt( 2 * PI * powf(normed_x, 3.0) ) * scale); prob = exp(-pow(normed_x-1, 2)/(2*normed_x)); if (normed_x < 0) pdf_val = 0.00000001f; else pdf_val = norm_para * prob; return pdf_val; } __forceinline__ __device__ float LaplacePdf(float x, float loc, float scale){ float normed_x, pdf_val; normed_x = fabs(x-loc) / scale; pdf_val = (1/(2 * scale)) * exp(- normed_x); return pdf_val; }
0f2e13cf3b1277611a9ba74a259776b383237585.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } void initialData_1(float *in, const int size) { for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f; } return; } // case 0 copy kernel: access data in rows __global__ void copyRow_1(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } } // main functions //int main(int argc, char **argv) //{ // // set up device // int dev = 0; // hipDeviceProp_t deviceProp; // hipError_t error; // // error = hipGetDeviceProperties(&deviceProp, dev); // printf("%s starting transpose at ", argv[0]); // printf("device %d: %s ", dev, deviceProp.name); // printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem); // error = hipSetDevice(dev); // // // set up array size 2048 // int nx = 1 << 5; // int ny = 1 << 5; // // // select a kernel and block size // int iKernel = 1; // int blockx = 16; // int blocky = 16; // // if (argc > 1) blockx = atoi(argv[1]); // // if (argc > 2) blocky = atoi(argv[2]); // // if (argc > 3) nx = atoi(argv[3]); // // if (argc > 4) ny = atoi(argv[4]); // // // size_t nBytes = nx * ny * sizeof(float); // float bytes = nBytes / (1024 * 1024); // printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes); // // // execution configuration // dim3 block(blockx, blocky); // dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // // // allocate host memory // float *h_A = (float *)malloc(nBytes); // float *hostRef = (float *)malloc(nBytes); // float *gpuRef = (float *)malloc(nBytes); // // // initialize host array // initialData_1(h_A, nx * ny); // // // allocate device memory // float *d_A, *d_C; // error = hipMalloc((float**)&d_A, nBytes); // error = hipMalloc((float**)&d_C, nBytes); // // // copy data from host to device // error = hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); // error = hipGetLastError(); // printf("%s - %s \n", hipGetErrorName(error),hipGetErrorString(error)); // // copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny); // error = hipDeviceSynchronize(); // // printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x, // block.y); // hipGetLastError(); // // // check kernel results // if (iKernel > 1) // { // error = hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // } // // // free host and device memory // error = hipFree(d_A); // error = hipFree(d_C); // free(h_A); // free(hostRef); // free(gpuRef); // // // reset device // error = hipDeviceReset(); // system("pause"); // return EXIT_SUCCESS; //}
0f2e13cf3b1277611a9ba74a259776b383237585.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <stdio.h> #include <iostream> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } void initialData_1(float *in, const int size) { for (int i = 0; i < size; i++) { in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f; } return; } // case 0 copy kernel: access data in rows __global__ void copyRow_1(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } } // main functions //int main(int argc, char **argv) //{ // // set up device // int dev = 0; // cudaDeviceProp deviceProp; // cudaError error; // // error = cudaGetDeviceProperties(&deviceProp, dev); // printf("%s starting transpose at ", argv[0]); // printf("device %d: %s ", dev, deviceProp.name); // printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem); // error = cudaSetDevice(dev); // // // set up array size 2048 // int nx = 1 << 5; // int ny = 1 << 5; // // // select a kernel and block size // int iKernel = 1; // int blockx = 16; // int blocky = 16; // // if (argc > 1) blockx = atoi(argv[1]); // // if (argc > 2) blocky = atoi(argv[2]); // // if (argc > 3) nx = atoi(argv[3]); // // if (argc > 4) ny = atoi(argv[4]); // // // size_t nBytes = nx * ny * sizeof(float); // float bytes = nBytes / (1024 * 1024); // printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes); // // // execution configuration // dim3 block(blockx, blocky); // dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // // // allocate host memory // float *h_A = (float *)malloc(nBytes); // float *hostRef = (float *)malloc(nBytes); // float *gpuRef = (float *)malloc(nBytes); // // // initialize host array // initialData_1(h_A, nx * ny); // // // allocate device memory // float *d_A, *d_C; // error = cudaMalloc((float**)&d_A, nBytes); // error = cudaMalloc((float**)&d_C, nBytes); // // // copy data from host to device // error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); // error = cudaGetLastError(); // printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error)); // // copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny); // error = cudaDeviceSynchronize(); // // printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x, // block.y); // cudaGetLastError(); // // // check kernel results // if (iKernel > 1) // { // error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // } // // // free host and device memory // error = cudaFree(d_A); // error = cudaFree(d_C); // free(h_A); // free(hostRef); // free(gpuRef); // // // reset device // error = cudaDeviceReset(); // system("pause"); // return EXIT_SUCCESS; //}
5066491db2c8778235023e1c3b2b306a1cc037ef.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_y0.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_y0), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_y0), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_y0), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5066491db2c8778235023e1c3b2b306a1cc037ef.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_y0.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_y0<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_y0<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_y0<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ead44c05d5b29fc9660ac419dc7af0724ed54cf9.hip
// !!! This is a file automatically generated by hipify!!! /** * @file hermes.cu * @brief Hermes emulation * @author John Melton, G0ORX/N6LYT */ /* Copyright (C) * 2015 - John Melton, G0ORX/N6LYT * * Based on code by Steven Passe AD0ES and Vasiliy Gokoyev K3IT * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <errno.h> #include <pthread.h> #include <sched.h> #include <semaphore.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <arpa/inet.h> #include <net/if.h> #include <netinet/if_ether.h> #include <netpacket/packet.h> #include <net/if_packet.h> #include <hipfft.h> #include <helper_cuda.h> #include "common_hip.cuh" #include "dfc.cuh" #include "inputbuffer.cuh" #include "rawbuffer.cuh" #include "filters.cuh" #include "receiver.cuh" #include "hermes.cuh" #include "time.cuh" #include "audio.cuh" #define PORT 1024 #define MAX_BUFFER_LEN 1032 #define HERMES_FW_VERSION 26 #define HERMES_ID 0x01 #define HERMES_LITE_ID 0x06 #define SYN 0x7f //#define SCALE_FACTOR 0x7fffffffL #define SCALE_FACTOR 8388607.0 // 2^24-1 int hpsdr_id=HERMES_ID; static int slicesamples; static pthread_t readThreadId; static pthread_t processThreadId; static pthread_t processRawThreadId; static int hermesSocket; static unsigned char hw_address[6]; static int state=0; // 0 = idle, 1 = running struct sockaddr_in clientAddr; static int sendIQ=0; static int sendRaw=0; static int outputrate=-1; // nothing static int outputsamplerate=0; static int receivers=1; static int mox=0; static int commonfrequency=0; static long tx_sequence=0; static long raw_sequence=0; #define MAX_RECEIVERS 7 /* static long frequency[MAX_RECEIVERS] = {14150000,14150000,14150000,14150000,14150000,14150000,14150000}; static int rotate[MAX_RECEIVERS] = {0,0,0,0,0,0,0}; static hipfftComplex* receiverdata[MAX_RECEIVERS]; static hipfftComplex* deviceReceiverdata[MAX_RECEIVERS]; static hipfftComplex* slicedata[MAX_RECEIVERS]; static hipfftComplex* deviceSlicedata[MAX_RECEIVERS]; static hipfftComplex* slice[MAX_RECEIVERS]; static hipfftComplex* deviceSlice[MAX_RECEIVERS]; static hipfftComplex* decimate[MAX_RECEIVERS]; static hipfftComplex* deviceDecimate[MAX_RECEIVERS]; */ static hipfftComplex* filter; static hipfftComplex* deviceFilter; static char* output[MAX_RECEIVERS]; static char* deviceOutput[MAX_RECEIVERS]; static float scale; static int d_size; static int d_size_2; static int ifft_decimate_factor; static int outrot; static hipfftHandle planC2C; #define FRAME_LENGTH 1032 static unsigned char frame[FRAME_LENGTH]; static int frameoffset; static unsigned char rawframe[FRAME_LENGTH]; static int rawframeoffset; void* readThread(void* arg); void* processThread(void* arg); void* processRawThread(void* arg); void processClientData(unsigned char* buffer); void processClientFrame(unsigned char* buffer); void initHermes() { int result; hipError_t error; fprintf(stderr,"initHermes\n"); scale=1.0; for(int i=0;i<FRAME_LENGTH;i++) { frame[i]='\0'; } frame[0]=0xef; frame[1]=0xfe; frame[2]=0x01; frame[3]=0x06; frame[4]=0x00; frame[5]=0x00; frame[6]=0x00; frame[7]=0x00; frame[8]=0x7f; frame[9]=0x7f; frame[10]=0x7f; frame[11]=0x00; frame[12]=0x1e; frame[13]=0x00; frame[14]=0x00; frame[15]=HERMES_FW_VERSION; frame[520]=0x7f; frame[521]=0x7f; frame[522]=0x7f; frame[523]=0x00; frame[524]=0x1e; frame[525]=0x00; frame[526]=0x00; frame[527]=HERMES_FW_VERSION; frameoffset=16; rawframe[0]=0xef; rawframe[1]=0xfe; rawframe[2]=0x01; rawframe[3]=0x04; rawframe[4]=0x00; rawframe[5]=0x00; rawframe[6]=0x00; rawframe[7]=0x00; rawframeoffset=8; if((result=pthread_create(&readThreadId, NULL, readThread, NULL)) < 0) { fprintf(stderr, "readThread create failed %d\n",result); exit(EXIT_FAILURE); } if((result=pthread_create(&processThreadId, NULL, processThread, NULL)) < 0) { fprintf(stderr, "processThread create failed %d\n",result); exit(EXIT_FAILURE); } if((result=pthread_create(&processRawThreadId, NULL, processRawThread, NULL)) < 0) { fprintf(stderr, "processRawThread create failed %d\n",result); exit(EXIT_FAILURE); } } void* readThread(void* arg) { struct sockaddr_in readAddr; uint8_t readBuffer[MAX_BUFFER_LEN]; socklen_t readLength; struct ifreq ifr; readLength = sizeof(readAddr); fprintf(stderr,"hermes readThread: running on cpu %d\n", sched_getcpu()); hermesSocket = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP); if (hermesSocket < 0) { perror("readThread: create socket failed for hermesSocket\n"); exit(EXIT_FAILURE); } int on=1; int rc = setsockopt(hermesSocket, SOL_SOCKET, SO_REUSEADDR, (const void*)&on, sizeof(on)); if (rc != 0) { fprintf(stderr, "readThread: cannot set SO_REUSEADDR: rc=%d\n", rc); exit(EXIT_FAILURE); } // Bind to this interface. readAddr.sin_family = AF_INET; readAddr.sin_port = htons(PORT); readAddr.sin_addr.s_addr = htonl(INADDR_ANY); if (bind(hermesSocket, (struct sockaddr*) &readAddr, sizeof(readAddr)) < 0) { perror("readThread: bind socket failed for hermesSocket\n"); exit(EXIT_FAILURE); } // Allow broadcast on the socket. rc = setsockopt(hermesSocket, SOL_SOCKET, SO_BROADCAST, (const void*)&on, sizeof(on)); if (rc != 0) { fprintf(stderr, "readThread: cannot set SO_BROADCAST: rc=%d\n", rc); exit(EXIT_FAILURE); } ifr.ifr_addr.sa_family = AF_INET; strncpy(ifr.ifr_name, interface, IFNAMSIZ-1); ioctl(hermesSocket, SIOCGIFADDR, &ifr); unsigned char* u = (unsigned char*)&ifr.ifr_addr.sa_data; for (int k = 0; k < 6; k++) hw_address[k] = u[k]; fprintf(stderr, "readThread: listening on %s (%02x:%02x:%02x:%02x:%02x:%02x)\n", inet_ntoa(((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr), hw_address[0], hw_address[1], hw_address[2], hw_address[3], hw_address[4], hw_address[5]); unsigned char discoverBuffer[MAX_BUFFER_LEN] = { 0xef, 0xfe, 0x02, 0, 0, 0, 0, 0, 0, HERMES_FW_VERSION, hpsdr_id }; while(1) { if ((rc=recvfrom(hermesSocket, readBuffer, sizeof(readBuffer), 0, (struct sockaddr*)&readAddr, &readLength)) < 0) { fprintf(stderr, "readThread: Bad recvfrom %d", rc); exit(EXIT_FAILURE); } //fprintf(stderr,"recvfrom: %d bytes\n", rc); if ((readBuffer[0] == 0xef) && (readBuffer[1] == 0xfe)) { switch(readBuffer[2]) { case 1: // data if(state) { // check if from expected client if(memcmp(&clientAddr,&readAddr, readLength)==0) { processClientData(readBuffer); } else { // ignore } } else { processClientData(readBuffer); } break; case 2: fprintf(stderr, "readThread: received discovery from %s %d\n", inet_ntoa(readAddr.sin_addr), htons(readAddr.sin_port)); for (int i = 0; i < 6; i++) { discoverBuffer[3 + i] = hw_address[i]; } discoverBuffer[2] |= state; discoverBuffer[10]=hpsdr_id; for (int i = 11; i < 60; i++) discoverBuffer[i] = 0; if ((rc=sendto(hermesSocket, discoverBuffer, 60, 0, (struct sockaddr*)&readAddr, sizeof(readAddr))) < 0) { fprintf(stderr, "readThread: Bad sendto %d",rc); exit(EXIT_FAILURE); } break; case 4: // start/stop command switch(readBuffer[3]) { case 0: if(state==0) { fprintf(stderr,"readThread: ignoring stop command from %s\n", inet_ntoa(readAddr.sin_addr)); } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { state=0; sendIQ=0; sendRaw=0; tx_sequence=0; raw_sequence=0; } else { fprintf(stderr,"readThread: ignoring stop command from %s\n", inet_ntoa(readAddr.sin_addr)); } break; case 1: if(state==0) { memcpy(&clientAddr,&readAddr, readLength); state=1; sendIQ=1; sendRaw=0; } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { sendIQ=1; sendRaw=0; } else { fprintf(stderr,"readThread: ignoring start command %d from %s\n", readBuffer[3], inet_ntoa(readAddr.sin_addr)); } break; case 2: if(state==0) { memcpy(&clientAddr,&readAddr, readLength); state=1; sendIQ=0; sendRaw=1; } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { sendIQ=0; sendRaw=1; } else { fprintf(stderr,"readThread: ignoring start command %d from %s\n", readBuffer[3], inet_ntoa(readAddr.sin_addr)); } break; case 3: if(state==0) { memcpy(&clientAddr,&readAddr, readLength); state=1; sendIQ=1; sendRaw=1; } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { sendIQ=1; sendRaw=1; } else { fprintf(stderr,"readThread: ignoring start command %d from %s\n", readBuffer[3], inet_ntoa(readAddr.sin_addr)); } break; } fprintf(stderr,"readThread: received start/stop command: state=%d sendIQ=%d sendRaw=%d\n", state, sendIQ, sendRaw); break; default: break; } } else { fprintf(stderr, "readThread: unexpected packet from %s (0x%02x 0x%02x 0x%02x)\n", inet_ntoa(readAddr.sin_addr), readBuffer[0], readBuffer[1], readBuffer[2]); } } } void processClientData(unsigned char* buffer) { int ep=buffer[3]&0xFF; if(ep==2) { processClientFrame(&buffer[8]); processClientFrame(&buffer[520]); } else { fprintf(stderr,"processClientData: unexpected endpoint %d\n", ep); } } void processClientFrame(unsigned char* buffer) { int id; int rate; int rcvrs; int rx; long f; int rot; hipError_t error; hipfftResult cufftError; if(buffer[0]==SYN && buffer[1]==SYN && buffer[2]==SYN) { mox=buffer[3]&0x01; id=(buffer[3]&0xFF)>>1; switch(id) { case 0: rate=buffer[4]&0x03; if(rate!=outputrate) { outputrate=rate; switch(rate) { case 0: outputsamplerate=48000; break; case 1: outputsamplerate=96000; break; case 2: outputsamplerate=192000; break; case 3: outputsamplerate=384000; break; } fprintf(stderr,"outputsamplerate=%d\n",outputsamplerate); filter=getFilter(rate); deviceFilter=getDeviceFilter(rate); slicesamples=(int)((float)outputsamplerate/hzperbin); //if(source=SOURCE_PCIE) { // d_size=256; // d_size_2=(samplingrate/256)/outputsamplerate; //} else { d_size=(samplingrate/10)/outputsamplerate; //} fprintf(stderr,"d_size=%d\n",d_size); ifft_decimate_factor = (samplingrate / d_size / 2 / outputsamplerate); fprintf(stderr,"ifft_decimate_factor=%d\n",ifft_decimate_factor); outrot = (int)(round((outputsamplerate/2) * NFACTOR) * V_SIZE)+9; fprintf(stderr,"outrot=%d\n",outrot); fprintf(stderr,"planC2C=%d\n",COMPLEX_SIGNAL_SIZE/d_size); cufftError = hipfftPlan1d(&planC2C, COMPLEX_SIGNAL_SIZE/d_size, HIPFFT_C2C, 1); if(cufftError!=HIPFFT_SUCCESS) { fprintf(stderr,"processClientFrame: Error creating hipfftPlan1d for Inverse FFT: %s\n", _cudaGetErrorEnum(cufftError)); exit(EXIT_FAILURE); } fprintf(stderr,"P_SIZE:%d V_SIZE:%d L_SIZE:%d RX_TD_SIZE=%d\n", P_SIZE,V_SIZE,L_SIZE,RX_TD_SIZE); fprintf(stderr,"DFT_BLOCK_SIZE:%d COMPLEX_SIGNAL_SIZE:%d\n", DFT_BLOCK_SIZE,COMPLEX_SIGNAL_SIZE); for(int i=0;i<MAX_RECEIVER;i++) { RECEIVER* r=&receiver[i]; r->outputrate=outputsamplerate; r->filter=filter; r->deviceFilter=deviceFilter; r->slicesamples=slicesamples; r->d_size=d_size; r->ifft_decimate_factor=ifft_decimate_factor; r->rx_td_size=RX_TD_SIZE; r->planC2C=planC2C; r->scale=1.0F; r->outrot=outrot; initReceiver(i); } } rcvrs=((buffer[7]>>3)&0x07)+1; if(receivers!=rcvrs) { receivers=rcvrs; fprintf(stderr,"processClientFrame: setting receivers to %d\n", receivers); } commonfrequency=(buffer[7]>>7)&0x01; break; case 1: // tx frequency break; case 2: case 3: case 4: case 5: case 6: case 7: case 8: rx=id-2; f = (buffer[4]&0xFF) << 24 | (buffer[5]&0xFF) << 16 | (buffer[6]&0xFF) << 8 | (buffer[7]&0xFF); rot=(int)((((float)f-((float)outputsamplerate/2.0f))/hzperbin)+0.5f); //rot=(int)((((float)f-((float)outputsamplerate/2.0f))/hzperbin)); //rot=(int)(((float)f/hzperbin)+0.5f); if(commonfrequency) { for(rx=0;rx<receivers;rx++) { receiver[rx].frequency=f; receiver[rx].rotate=rot; } } else { receiver[rx].frequency=f; receiver[rx].rotate=rot; } //fprintf(stderr,"set new frequency(%d) %ld rot=%d\n", rx, f, rot); break; case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: break; default: break; } if(audio) { audio_write(buffer); } } else { fprintf(stderr,"processClientFrame: syn error 0x%02x 0x%02x 0x%02x\n", buffer[0], buffer[1],buffer[2]); } } void* processThread(void* arg) { int result; hipError_t error; #ifdef TIMING long long starttime; long long endtime; #endif fprintf(stderr,"hermes processThread: running on cpu %d\n", sched_getcpu()); // get the next buffer result=sem_post(&frequencyBufferEmpty); if(result!=0) { fprintf(stderr, "processThread: sem_post failed for frequencyBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } while(1) { result=sem_wait(&frequencyBufferFull); if(result!=0) { fprintf(stderr, "processThread: sem_wait failed for frequencyBufferFull: %d\n", result); exit(EXIT_FAILURE); } if(state && sendIQ) { #ifdef TIMING starttime=current_timestamp(); #endif // process the buffer for each receiver // TODO handle commonfrequency for(int i=0;i<receivers;i++) { result=sem_post(&receiver[i].inputReady); if(result!=0) { fprintf(stderr, "processRawThread: sem_post failed for inputReady %d: %d\n", i, result); exit(EXIT_FAILURE); } } for(int i=0;i<receivers;i++) { result=sem_wait(&receiver[i].outputReady); if(result!=0) { fprintf(stderr, "processRawThread: sem_wait failed for inputReady %d: %d\n", i, result); exit(EXIT_FAILURE); } } // can get the next buffer result=sem_post(&frequencyBufferEmpty); if(result!=0) { fprintf(stderr, "processThread: sem_post failed for frequencyBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } // copy the IQ samples //fprintf(stderr,"copying %d IQ samples\n", RX_TD_SIZE); for(int i=0;i<RX_TD_SIZE;i++) { // I/Q samples for each receiver for(int r=0;r<receivers;r++) { for(int j=0;j<6;j++) { frame[frameoffset++]=receiver[r].output[(i*6)+j]; } } // mic samples frame[frameoffset++]=0x00; frame[frameoffset++]=0x00; if(frameoffset<=520) { if(frameoffset+(receivers*6)+2>520) { //fprintf(stderr,"frameoffset=%d setting to 528\n",frameoffset); frameoffset=528; } } else if(frameoffset<=1032) { if(frameoffset+(receivers*6)+2>1032) { //fprintf(stderr,"frameoffset=%d sendign and setting to 16\n",frameoffset); // send the frame frame[4] = (tx_sequence >> 24) & 0xff; frame[5] = (tx_sequence >> 16) & 0xff; frame[6] = (tx_sequence >> 8) & 0xff; frame[7] = tx_sequence & 0xff; //fprintf(stderr,"send frame offset=%d seq=%ld\n",frameoffset,tx_sequence); if ((result=sendto(hermesSocket, frame, 1032, 0, (struct sockaddr*)&clientAddr, sizeof(clientAddr))) < 0) { fprintf(stderr, "Error sending data to client %d\n", result); exit(EXIT_FAILURE); } tx_sequence++; frameoffset=16; } } } //fprintf(stderr,"copied samples: frameoffset=%d\n",frameoffset); #ifdef TIMING endtime=current_timestamp(); fprintf(stderr,"process took %lld ms to process %d receivers\n", endtime-starttime, receivers); #endif } else { // can get the next buffer result=sem_post(&frequencyBufferEmpty); if(result!=0) { fprintf(stderr, "processThread: sem_post failed for frequencyBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } } } } void* processRawThread(void* arg) { int result; fprintf(stderr,"hermes processRawThread: running on cpu %d\n", sched_getcpu()); while(1) { // get the next buffer result=sem_post(&rawBufferEmpty); if(result!=0) { fprintf(stderr, "processRawThread: sem_post failed for rawBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } result=sem_wait(&rawBufferFull); if(result!=0) { fprintf(stderr, "processRawThread: sem_wait failed for rawBufferFull: %d\n", result); exit(EXIT_FAILURE); } if(state && sendRaw) { for(int i=0;i<RAW_BUFFER_SIZE;i++) { rawframe[rawframeoffset++]=rawBuffer[i]&0xFF; rawframe[rawframeoffset++]=(rawBuffer[i]>>8)&0xFF; if(rawframeoffset>=1032) { rawframe[4] = (raw_sequence >> 24) & 0xff; rawframe[5] = (raw_sequence >> 16) & 0xff; rawframe[6] = (raw_sequence >> 8) & 0xff; rawframe[7] = raw_sequence & 0xff; if ((result=sendto(hermesSocket, rawframe, 1032, 0, (struct sockaddr*)&clientAddr, sizeof(clientAddr))) < 0) { fprintf(stderr, "Error sending raw data to client %d\n", result); exit(EXIT_FAILURE); } raw_sequence++; rawframeoffset=8; } } } } }
ead44c05d5b29fc9660ac419dc7af0724ed54cf9.cu
/** * @file hermes.cu * @brief Hermes emulation * @author John Melton, G0ORX/N6LYT */ /* Copyright (C) * 2015 - John Melton, G0ORX/N6LYT * * Based on code by Steven Passe AD0ES and Vasiliy Gokoyev K3IT * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <errno.h> #include <pthread.h> #include <sched.h> #include <semaphore.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <arpa/inet.h> #include <net/if.h> #include <netinet/if_ether.h> #include <netpacket/packet.h> #include <net/if_packet.h> #include <cufft.h> #include <helper_cuda.h> #include "common.cuh" #include "dfc.cuh" #include "inputbuffer.cuh" #include "rawbuffer.cuh" #include "filters.cuh" #include "receiver.cuh" #include "hermes.cuh" #include "time.cuh" #include "audio.cuh" #define PORT 1024 #define MAX_BUFFER_LEN 1032 #define HERMES_FW_VERSION 26 #define HERMES_ID 0x01 #define HERMES_LITE_ID 0x06 #define SYN 0x7f //#define SCALE_FACTOR 0x7fffffffL #define SCALE_FACTOR 8388607.0 // 2^24-1 int hpsdr_id=HERMES_ID; static int slicesamples; static pthread_t readThreadId; static pthread_t processThreadId; static pthread_t processRawThreadId; static int hermesSocket; static unsigned char hw_address[6]; static int state=0; // 0 = idle, 1 = running struct sockaddr_in clientAddr; static int sendIQ=0; static int sendRaw=0; static int outputrate=-1; // nothing static int outputsamplerate=0; static int receivers=1; static int mox=0; static int commonfrequency=0; static long tx_sequence=0; static long raw_sequence=0; #define MAX_RECEIVERS 7 /* static long frequency[MAX_RECEIVERS] = {14150000,14150000,14150000,14150000,14150000,14150000,14150000}; static int rotate[MAX_RECEIVERS] = {0,0,0,0,0,0,0}; static cufftComplex* receiverdata[MAX_RECEIVERS]; static cufftComplex* deviceReceiverdata[MAX_RECEIVERS]; static cufftComplex* slicedata[MAX_RECEIVERS]; static cufftComplex* deviceSlicedata[MAX_RECEIVERS]; static cufftComplex* slice[MAX_RECEIVERS]; static cufftComplex* deviceSlice[MAX_RECEIVERS]; static cufftComplex* decimate[MAX_RECEIVERS]; static cufftComplex* deviceDecimate[MAX_RECEIVERS]; */ static cufftComplex* filter; static cufftComplex* deviceFilter; static char* output[MAX_RECEIVERS]; static char* deviceOutput[MAX_RECEIVERS]; static float scale; static int d_size; static int d_size_2; static int ifft_decimate_factor; static int outrot; static cufftHandle planC2C; #define FRAME_LENGTH 1032 static unsigned char frame[FRAME_LENGTH]; static int frameoffset; static unsigned char rawframe[FRAME_LENGTH]; static int rawframeoffset; void* readThread(void* arg); void* processThread(void* arg); void* processRawThread(void* arg); void processClientData(unsigned char* buffer); void processClientFrame(unsigned char* buffer); void initHermes() { int result; cudaError_t error; fprintf(stderr,"initHermes\n"); scale=1.0; for(int i=0;i<FRAME_LENGTH;i++) { frame[i]='\0'; } frame[0]=0xef; frame[1]=0xfe; frame[2]=0x01; frame[3]=0x06; frame[4]=0x00; frame[5]=0x00; frame[6]=0x00; frame[7]=0x00; frame[8]=0x7f; frame[9]=0x7f; frame[10]=0x7f; frame[11]=0x00; frame[12]=0x1e; frame[13]=0x00; frame[14]=0x00; frame[15]=HERMES_FW_VERSION; frame[520]=0x7f; frame[521]=0x7f; frame[522]=0x7f; frame[523]=0x00; frame[524]=0x1e; frame[525]=0x00; frame[526]=0x00; frame[527]=HERMES_FW_VERSION; frameoffset=16; rawframe[0]=0xef; rawframe[1]=0xfe; rawframe[2]=0x01; rawframe[3]=0x04; rawframe[4]=0x00; rawframe[5]=0x00; rawframe[6]=0x00; rawframe[7]=0x00; rawframeoffset=8; if((result=pthread_create(&readThreadId, NULL, readThread, NULL)) < 0) { fprintf(stderr, "readThread create failed %d\n",result); exit(EXIT_FAILURE); } if((result=pthread_create(&processThreadId, NULL, processThread, NULL)) < 0) { fprintf(stderr, "processThread create failed %d\n",result); exit(EXIT_FAILURE); } if((result=pthread_create(&processRawThreadId, NULL, processRawThread, NULL)) < 0) { fprintf(stderr, "processRawThread create failed %d\n",result); exit(EXIT_FAILURE); } } void* readThread(void* arg) { struct sockaddr_in readAddr; uint8_t readBuffer[MAX_BUFFER_LEN]; socklen_t readLength; struct ifreq ifr; readLength = sizeof(readAddr); fprintf(stderr,"hermes readThread: running on cpu %d\n", sched_getcpu()); hermesSocket = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP); if (hermesSocket < 0) { perror("readThread: create socket failed for hermesSocket\n"); exit(EXIT_FAILURE); } int on=1; int rc = setsockopt(hermesSocket, SOL_SOCKET, SO_REUSEADDR, (const void*)&on, sizeof(on)); if (rc != 0) { fprintf(stderr, "readThread: cannot set SO_REUSEADDR: rc=%d\n", rc); exit(EXIT_FAILURE); } // Bind to this interface. readAddr.sin_family = AF_INET; readAddr.sin_port = htons(PORT); readAddr.sin_addr.s_addr = htonl(INADDR_ANY); if (bind(hermesSocket, (struct sockaddr*) &readAddr, sizeof(readAddr)) < 0) { perror("readThread: bind socket failed for hermesSocket\n"); exit(EXIT_FAILURE); } // Allow broadcast on the socket. rc = setsockopt(hermesSocket, SOL_SOCKET, SO_BROADCAST, (const void*)&on, sizeof(on)); if (rc != 0) { fprintf(stderr, "readThread: cannot set SO_BROADCAST: rc=%d\n", rc); exit(EXIT_FAILURE); } ifr.ifr_addr.sa_family = AF_INET; strncpy(ifr.ifr_name, interface, IFNAMSIZ-1); ioctl(hermesSocket, SIOCGIFADDR, &ifr); unsigned char* u = (unsigned char*)&ifr.ifr_addr.sa_data; for (int k = 0; k < 6; k++) hw_address[k] = u[k]; fprintf(stderr, "readThread: listening on %s (%02x:%02x:%02x:%02x:%02x:%02x)\n", inet_ntoa(((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr), hw_address[0], hw_address[1], hw_address[2], hw_address[3], hw_address[4], hw_address[5]); unsigned char discoverBuffer[MAX_BUFFER_LEN] = { 0xef, 0xfe, 0x02, 0, 0, 0, 0, 0, 0, HERMES_FW_VERSION, hpsdr_id }; while(1) { if ((rc=recvfrom(hermesSocket, readBuffer, sizeof(readBuffer), 0, (struct sockaddr*)&readAddr, &readLength)) < 0) { fprintf(stderr, "readThread: Bad recvfrom %d", rc); exit(EXIT_FAILURE); } //fprintf(stderr,"recvfrom: %d bytes\n", rc); if ((readBuffer[0] == 0xef) && (readBuffer[1] == 0xfe)) { switch(readBuffer[2]) { case 1: // data if(state) { // check if from expected client if(memcmp(&clientAddr,&readAddr, readLength)==0) { processClientData(readBuffer); } else { // ignore } } else { processClientData(readBuffer); } break; case 2: fprintf(stderr, "readThread: received discovery from %s %d\n", inet_ntoa(readAddr.sin_addr), htons(readAddr.sin_port)); for (int i = 0; i < 6; i++) { discoverBuffer[3 + i] = hw_address[i]; } discoverBuffer[2] |= state; discoverBuffer[10]=hpsdr_id; for (int i = 11; i < 60; i++) discoverBuffer[i] = 0; if ((rc=sendto(hermesSocket, discoverBuffer, 60, 0, (struct sockaddr*)&readAddr, sizeof(readAddr))) < 0) { fprintf(stderr, "readThread: Bad sendto %d",rc); exit(EXIT_FAILURE); } break; case 4: // start/stop command switch(readBuffer[3]) { case 0: if(state==0) { fprintf(stderr,"readThread: ignoring stop command from %s\n", inet_ntoa(readAddr.sin_addr)); } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { state=0; sendIQ=0; sendRaw=0; tx_sequence=0; raw_sequence=0; } else { fprintf(stderr,"readThread: ignoring stop command from %s\n", inet_ntoa(readAddr.sin_addr)); } break; case 1: if(state==0) { memcpy(&clientAddr,&readAddr, readLength); state=1; sendIQ=1; sendRaw=0; } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { sendIQ=1; sendRaw=0; } else { fprintf(stderr,"readThread: ignoring start command %d from %s\n", readBuffer[3], inet_ntoa(readAddr.sin_addr)); } break; case 2: if(state==0) { memcpy(&clientAddr,&readAddr, readLength); state=1; sendIQ=0; sendRaw=1; } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { sendIQ=0; sendRaw=1; } else { fprintf(stderr,"readThread: ignoring start command %d from %s\n", readBuffer[3], inet_ntoa(readAddr.sin_addr)); } break; case 3: if(state==0) { memcpy(&clientAddr,&readAddr, readLength); state=1; sendIQ=1; sendRaw=1; } else if(memcmp(&clientAddr,&readAddr, readLength)==0) { sendIQ=1; sendRaw=1; } else { fprintf(stderr,"readThread: ignoring start command %d from %s\n", readBuffer[3], inet_ntoa(readAddr.sin_addr)); } break; } fprintf(stderr,"readThread: received start/stop command: state=%d sendIQ=%d sendRaw=%d\n", state, sendIQ, sendRaw); break; default: break; } } else { fprintf(stderr, "readThread: unexpected packet from %s (0x%02x 0x%02x 0x%02x)\n", inet_ntoa(readAddr.sin_addr), readBuffer[0], readBuffer[1], readBuffer[2]); } } } void processClientData(unsigned char* buffer) { int ep=buffer[3]&0xFF; if(ep==2) { processClientFrame(&buffer[8]); processClientFrame(&buffer[520]); } else { fprintf(stderr,"processClientData: unexpected endpoint %d\n", ep); } } void processClientFrame(unsigned char* buffer) { int id; int rate; int rcvrs; int rx; long f; int rot; cudaError_t error; cufftResult cufftError; if(buffer[0]==SYN && buffer[1]==SYN && buffer[2]==SYN) { mox=buffer[3]&0x01; id=(buffer[3]&0xFF)>>1; switch(id) { case 0: rate=buffer[4]&0x03; if(rate!=outputrate) { outputrate=rate; switch(rate) { case 0: outputsamplerate=48000; break; case 1: outputsamplerate=96000; break; case 2: outputsamplerate=192000; break; case 3: outputsamplerate=384000; break; } fprintf(stderr,"outputsamplerate=%d\n",outputsamplerate); filter=getFilter(rate); deviceFilter=getDeviceFilter(rate); slicesamples=(int)((float)outputsamplerate/hzperbin); //if(source=SOURCE_PCIE) { // d_size=256; // d_size_2=(samplingrate/256)/outputsamplerate; //} else { d_size=(samplingrate/10)/outputsamplerate; //} fprintf(stderr,"d_size=%d\n",d_size); ifft_decimate_factor = (samplingrate / d_size / 2 / outputsamplerate); fprintf(stderr,"ifft_decimate_factor=%d\n",ifft_decimate_factor); outrot = (int)(round((outputsamplerate/2) * NFACTOR) * V_SIZE)+9; fprintf(stderr,"outrot=%d\n",outrot); fprintf(stderr,"planC2C=%d\n",COMPLEX_SIGNAL_SIZE/d_size); cufftError = cufftPlan1d(&planC2C, COMPLEX_SIGNAL_SIZE/d_size, CUFFT_C2C, 1); if(cufftError!=CUFFT_SUCCESS) { fprintf(stderr,"processClientFrame: Error creating cufftPlan1d for Inverse FFT: %s\n", _cudaGetErrorEnum(cufftError)); exit(EXIT_FAILURE); } fprintf(stderr,"P_SIZE:%d V_SIZE:%d L_SIZE:%d RX_TD_SIZE=%d\n", P_SIZE,V_SIZE,L_SIZE,RX_TD_SIZE); fprintf(stderr,"DFT_BLOCK_SIZE:%d COMPLEX_SIGNAL_SIZE:%d\n", DFT_BLOCK_SIZE,COMPLEX_SIGNAL_SIZE); for(int i=0;i<MAX_RECEIVER;i++) { RECEIVER* r=&receiver[i]; r->outputrate=outputsamplerate; r->filter=filter; r->deviceFilter=deviceFilter; r->slicesamples=slicesamples; r->d_size=d_size; r->ifft_decimate_factor=ifft_decimate_factor; r->rx_td_size=RX_TD_SIZE; r->planC2C=planC2C; r->scale=1.0F; r->outrot=outrot; initReceiver(i); } } rcvrs=((buffer[7]>>3)&0x07)+1; if(receivers!=rcvrs) { receivers=rcvrs; fprintf(stderr,"processClientFrame: setting receivers to %d\n", receivers); } commonfrequency=(buffer[7]>>7)&0x01; break; case 1: // tx frequency break; case 2: case 3: case 4: case 5: case 6: case 7: case 8: rx=id-2; f = (buffer[4]&0xFF) << 24 | (buffer[5]&0xFF) << 16 | (buffer[6]&0xFF) << 8 | (buffer[7]&0xFF); rot=(int)((((float)f-((float)outputsamplerate/2.0f))/hzperbin)+0.5f); //rot=(int)((((float)f-((float)outputsamplerate/2.0f))/hzperbin)); //rot=(int)(((float)f/hzperbin)+0.5f); if(commonfrequency) { for(rx=0;rx<receivers;rx++) { receiver[rx].frequency=f; receiver[rx].rotate=rot; } } else { receiver[rx].frequency=f; receiver[rx].rotate=rot; } //fprintf(stderr,"set new frequency(%d) %ld rot=%d\n", rx, f, rot); break; case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: break; default: break; } if(audio) { audio_write(buffer); } } else { fprintf(stderr,"processClientFrame: syn error 0x%02x 0x%02x 0x%02x\n", buffer[0], buffer[1],buffer[2]); } } void* processThread(void* arg) { int result; cudaError_t error; #ifdef TIMING long long starttime; long long endtime; #endif fprintf(stderr,"hermes processThread: running on cpu %d\n", sched_getcpu()); // get the next buffer result=sem_post(&frequencyBufferEmpty); if(result!=0) { fprintf(stderr, "processThread: sem_post failed for frequencyBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } while(1) { result=sem_wait(&frequencyBufferFull); if(result!=0) { fprintf(stderr, "processThread: sem_wait failed for frequencyBufferFull: %d\n", result); exit(EXIT_FAILURE); } if(state && sendIQ) { #ifdef TIMING starttime=current_timestamp(); #endif // process the buffer for each receiver // TODO handle commonfrequency for(int i=0;i<receivers;i++) { result=sem_post(&receiver[i].inputReady); if(result!=0) { fprintf(stderr, "processRawThread: sem_post failed for inputReady %d: %d\n", i, result); exit(EXIT_FAILURE); } } for(int i=0;i<receivers;i++) { result=sem_wait(&receiver[i].outputReady); if(result!=0) { fprintf(stderr, "processRawThread: sem_wait failed for inputReady %d: %d\n", i, result); exit(EXIT_FAILURE); } } // can get the next buffer result=sem_post(&frequencyBufferEmpty); if(result!=0) { fprintf(stderr, "processThread: sem_post failed for frequencyBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } // copy the IQ samples //fprintf(stderr,"copying %d IQ samples\n", RX_TD_SIZE); for(int i=0;i<RX_TD_SIZE;i++) { // I/Q samples for each receiver for(int r=0;r<receivers;r++) { for(int j=0;j<6;j++) { frame[frameoffset++]=receiver[r].output[(i*6)+j]; } } // mic samples frame[frameoffset++]=0x00; frame[frameoffset++]=0x00; if(frameoffset<=520) { if(frameoffset+(receivers*6)+2>520) { //fprintf(stderr,"frameoffset=%d setting to 528\n",frameoffset); frameoffset=528; } } else if(frameoffset<=1032) { if(frameoffset+(receivers*6)+2>1032) { //fprintf(stderr,"frameoffset=%d sendign and setting to 16\n",frameoffset); // send the frame frame[4] = (tx_sequence >> 24) & 0xff; frame[5] = (tx_sequence >> 16) & 0xff; frame[6] = (tx_sequence >> 8) & 0xff; frame[7] = tx_sequence & 0xff; //fprintf(stderr,"send frame offset=%d seq=%ld\n",frameoffset,tx_sequence); if ((result=sendto(hermesSocket, frame, 1032, 0, (struct sockaddr*)&clientAddr, sizeof(clientAddr))) < 0) { fprintf(stderr, "Error sending data to client %d\n", result); exit(EXIT_FAILURE); } tx_sequence++; frameoffset=16; } } } //fprintf(stderr,"copied samples: frameoffset=%d\n",frameoffset); #ifdef TIMING endtime=current_timestamp(); fprintf(stderr,"process took %lld ms to process %d receivers\n", endtime-starttime, receivers); #endif } else { // can get the next buffer result=sem_post(&frequencyBufferEmpty); if(result!=0) { fprintf(stderr, "processThread: sem_post failed for frequencyBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } } } } void* processRawThread(void* arg) { int result; fprintf(stderr,"hermes processRawThread: running on cpu %d\n", sched_getcpu()); while(1) { // get the next buffer result=sem_post(&rawBufferEmpty); if(result!=0) { fprintf(stderr, "processRawThread: sem_post failed for rawBufferEmpty: %d\n", result); exit(EXIT_FAILURE); } result=sem_wait(&rawBufferFull); if(result!=0) { fprintf(stderr, "processRawThread: sem_wait failed for rawBufferFull: %d\n", result); exit(EXIT_FAILURE); } if(state && sendRaw) { for(int i=0;i<RAW_BUFFER_SIZE;i++) { rawframe[rawframeoffset++]=rawBuffer[i]&0xFF; rawframe[rawframeoffset++]=(rawBuffer[i]>>8)&0xFF; if(rawframeoffset>=1032) { rawframe[4] = (raw_sequence >> 24) & 0xff; rawframe[5] = (raw_sequence >> 16) & 0xff; rawframe[6] = (raw_sequence >> 8) & 0xff; rawframe[7] = raw_sequence & 0xff; if ((result=sendto(hermesSocket, rawframe, 1032, 0, (struct sockaddr*)&clientAddr, sizeof(clientAddr))) < 0) { fprintf(stderr, "Error sending raw data to client %d\n", result); exit(EXIT_FAILURE); } raw_sequence++; rawframeoffset=8; } } } } }
fb4fdc1f4c65a9fc54d19040a7292b076e5b2f5a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <math_constants.h> extern "C" { __global__ void rtruncnorm_kernel(float *vals, int n, float *mu, float *sigma, float *lo, float *hi, int mu_len, int sigma_len, int lo_len, int hi_len, int maxtries, int rng_a, int rng_b, int rng_c) { // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; // Setup the RNG: // Sample: return; } } // END extern "C"
fb4fdc1f4c65a9fc54d19040a7292b076e5b2f5a.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #include <math_constants.h> extern "C" { __global__ void rtruncnorm_kernel(float *vals, int n, float *mu, float *sigma, float *lo, float *hi, int mu_len, int sigma_len, int lo_len, int hi_len, int maxtries, int rng_a, int rng_b, int rng_c) { // Usual block/thread indexing... int myblock = blockIdx.x + blockIdx.y * gridDim.x; int blocksize = blockDim.x * blockDim.y * blockDim.z; int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int idx = myblock * blocksize + subthread; // Setup the RNG: // Sample: return; } } // END extern "C"
d0dc6cdc9b72d0b93cd02dce160bb2013362ae32.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cublas_helpers.h" #include "trmm_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor TrmmOperationProfiler::TrmmOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kTrmm, { {ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"}, {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"}, {ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"}, }, { library::Provider::kCUBLAS} ) { description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A"; } /// Destructor TrmmOperationProfiler::~TrmmOperationProfiler() { } /// Prints usage statement for the math function void TrmmOperationProfiler::print_usage(std::ostream &out) const { out << "TRMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void TrmmOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" << " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" << " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=Trmm \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif Status TrmmOperationProfiler::TrmmProblem::parse( library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (this->split_k_slices > 1 && this->batch_count > 1) { // At least one of these must be one return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } if (operation_desc.side_mode == SideMode::kLeft) { this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->m)}).front(); } else if (operation_desc.side_mode == SideMode::kRight) { this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->n), int(this->n)}).front(); } this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->m), int(this->n)}).front(); this->ldd = DeviceAllocation::get_packed_layout( operation_desc.D.layout, {int(this->m), int(this->n)}).front(); return Status::kSuccess; } /// Initializes a performance result void TrmmOperationProfiler::TrmmProblem::initialize_result( PerformanceResult &result, library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode)); set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "D", problem_space, std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status TrmmOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::TrmmDescription const &operation_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } trmm_workspace_.configuration.problem_size.m() = int(problem_.m); trmm_workspace_.configuration.problem_size.n() = int(problem_.n); trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft) ? int(problem_.m) : int(problem_.n); trmm_workspace_.configuration.lda = problem_.lda; trmm_workspace_.configuration.ldb = problem_.ldb; trmm_workspace_.configuration.ldd = problem_.ldd; //trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices); trmm_workspace_.arguments.A = nullptr; trmm_workspace_.arguments.B = nullptr; trmm_workspace_.arguments.D = nullptr; trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments); } /// Initializes the performance result void TrmmOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); if (operation_desc.side_mode == SideMode::kLeft) { // Input bytes read and Output bytes written for the trmm problem result.bytes = // Half matrix including the diagonal will have (M*(M+1))/2 elements int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; } else if (operation_desc.side_mode == SideMode::kRight) { // Input bytes read and Output bytes written for the trmm problem result.bytes = // Half matrix including the diagonal will have (N*(N+1))/2 elements int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; } // FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero result.flops = problem_.m * (problem_.m + 1) * problem_.n; result.runtime = 0; // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: result.flops *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: result.flops *= 4; break; default: break; } } /// Initializes workspace Status TrmmOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::TrmmDescription const &operation_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; if (operation_desc.side_mode == SideMode::kLeft) { trmm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.m)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); } else if (operation_desc.side_mode == SideMode::kRight) { trmm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); } trmm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldb)}, 1, // batch_count seed_shift++ ); trmm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.D.element, operation_desc.D.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldd)} ); trmm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.D.element, operation_desc.D.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldd)} ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration); trmm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration); trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &trmm_workspace_.configuration, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kTrmm; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool TrmmOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &trmm_workspace_.arguments, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUBLAS if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { // Guard against unsupported cases auto const & trmm_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (cublas_satisfies(trmm_desc) == Status::kSuccess) { // call cublas verification if supported verify_with_cublas_( options, report, device_context, operation, problem_space, problem); } else { // set verification map for cublas to not supported results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUBLAS // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool TrmmOperationProfiler::verify_with_cublas_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { #if CUTLASS_ENABLE_CUBLAS library::TrmmDescription const &trmm_desc = static_cast<library::TrmmDescription const &>(operation->description()); // // Construct cuBLAS operators // CublasCreate handle; hipblasStatus_t status = handle.get_cublas_create_status(); if (status != HIPBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Initialize state // try { // // Construct dispatcher to cublas<t>Trmm() // // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Reference->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; detail::cublasTrmmDispatcher trmm_op( trmm_desc, trmm_workspace_.configuration, trmm_workspace_.arguments ); if (trmm_op.status != Status::kSuccess) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; return true; } results_.back().status = Status::kSuccess; status = trmm_op(handle); // Handle errors if (status != HIPBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( options, *trmm_workspace_.Computed, *trmm_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { save_workspace( device_context, options, trmm_desc, library::Provider::kCUTLASS, library::Provider::kCUBLAS); } } catch (...) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; } #endif // Return true means continue profiling return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool TrmmOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &trmm_workspace_.arguments, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data() ); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
d0dc6cdc9b72d0b93cd02dce160bb2013362ae32.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cublas_helpers.h" #include "trmm_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor TrmmOperationProfiler::TrmmOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kTrmm, { {ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"}, {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"}, {ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"}, }, { library::Provider::kCUBLAS} ) { description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A"; } /// Destructor TrmmOperationProfiler::~TrmmOperationProfiler() { } /// Prints usage statement for the math function void TrmmOperationProfiler::print_usage(std::ostream &out) const { out << "TRMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void TrmmOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" << " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" << " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=Trmm \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif Status TrmmOperationProfiler::TrmmProblem::parse( library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (this->split_k_slices > 1 && this->batch_count > 1) { // At least one of these must be one return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } if (operation_desc.side_mode == SideMode::kLeft) { this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->m)}).front(); } else if (operation_desc.side_mode == SideMode::kRight) { this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->n), int(this->n)}).front(); } this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->m), int(this->n)}).front(); this->ldd = DeviceAllocation::get_packed_layout( operation_desc.D.layout, {int(this->m), int(this->n)}).front(); return Status::kSuccess; } /// Initializes a performance result void TrmmOperationProfiler::TrmmProblem::initialize_result( PerformanceResult &result, library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode)); set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "D", problem_space, std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status TrmmOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::TrmmDescription const &operation_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } trmm_workspace_.configuration.problem_size.m() = int(problem_.m); trmm_workspace_.configuration.problem_size.n() = int(problem_.n); trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft) ? int(problem_.m) : int(problem_.n); trmm_workspace_.configuration.lda = problem_.lda; trmm_workspace_.configuration.ldb = problem_.ldb; trmm_workspace_.configuration.ldd = problem_.ldd; //trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices); trmm_workspace_.arguments.A = nullptr; trmm_workspace_.arguments.B = nullptr; trmm_workspace_.arguments.D = nullptr; trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments); } /// Initializes the performance result void TrmmOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); if (operation_desc.side_mode == SideMode::kLeft) { // Input bytes read and Output bytes written for the trmm problem result.bytes = // Half matrix including the diagonal will have (M*(M+1))/2 elements int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; } else if (operation_desc.side_mode == SideMode::kRight) { // Input bytes read and Output bytes written for the trmm problem result.bytes = // Half matrix including the diagonal will have (N*(N+1))/2 elements int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; } // FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero result.flops = problem_.m * (problem_.m + 1) * problem_.n; result.runtime = 0; // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: result.flops *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: result.flops *= 4; break; default: break; } } /// Initializes workspace Status TrmmOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::TrmmDescription const &operation_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; if (operation_desc.side_mode == SideMode::kLeft) { trmm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.m)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); } else if (operation_desc.side_mode == SideMode::kRight) { trmm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); } trmm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldb)}, 1, // batch_count seed_shift++ ); trmm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.D.element, operation_desc.D.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldd)} ); trmm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.D.element, operation_desc.D.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldd)} ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration); trmm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration); trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &trmm_workspace_.configuration, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kTrmm; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool TrmmOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &trmm_workspace_.arguments, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUBLAS if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { // Guard against unsupported cases auto const & trmm_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (cublas_satisfies(trmm_desc) == Status::kSuccess) { // call cublas verification if supported verify_with_cublas_( options, report, device_context, operation, problem_space, problem); } else { // set verification map for cublas to not supported results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUBLAS // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool TrmmOperationProfiler::verify_with_cublas_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { #if CUTLASS_ENABLE_CUBLAS library::TrmmDescription const &trmm_desc = static_cast<library::TrmmDescription const &>(operation->description()); // // Construct cuBLAS operators // CublasCreate handle; cublasStatus_t status = handle.get_cublas_create_status(); if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Initialize state // try { // // Construct dispatcher to cublas<t>Trmm() // // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Reference->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; detail::cublasTrmmDispatcher trmm_op( trmm_desc, trmm_workspace_.configuration, trmm_workspace_.arguments ); if (trmm_op.status != Status::kSuccess) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; return true; } results_.back().status = Status::kSuccess; status = trmm_op(handle); // Handle errors if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( options, *trmm_workspace_.Computed, *trmm_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { save_workspace( device_context, options, trmm_desc, library::Provider::kCUTLASS, library::Provider::kCUBLAS); } } catch (...) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; } #endif // Return true means continue profiling return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool TrmmOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &trmm_workspace_.arguments, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data() ); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
ebebb9913bbbcd9ef436ddc83091feb0fee0ff87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Host/Numeric.hpp" #include "Device/Util/DeviceProperties.cuh" #include "Device/Util/PrintExt.cuh" #include "Device/Util/Algorithm.cuh" #include "Device/Primitives/BinarySearchLB.cuh" #include "Device/Primitives/impl/BinarySearchLB2.i.cuh" #include "Device/Primitives/MergePathLB.cuh" #include "Device/Util/Timer.cuh" //#include <Graph/GraphBase.hpp> #include <Graph/GraphStd.hpp> #include <Graph/GraphWeight.hpp> #include <Graph/BellmanFord.hpp> #include <Graph/Dijkstra.hpp> #include <iostream> #include "Device/Util/Timer.cuh" #include "Device/DataMovement/impl/Block.i.cuh" #include <hip/hip_cooperative_groups.h> //#define ENABLE_MGPU #include <random> #include <chrono> #include "StandardAPI.hpp" #if defined(ENABLE_MGPU) #include <moderngpu/kernel_load_balance.hxx> #endif using namespace graph; using namespace timer; using namespace hornets_nest; template<int ITEMS_PER_BLOCK, int BLOCK_SIZE> __global__ void MergePathTest2(const int* __restrict__ d_partitions, int num_partitions, const int* __restrict__ d_prefixsum, int prefixsum_size, int* __restrict__ d_pos, int* __restrict__ d_offset) { __shared__ int smem[ITEMS_PER_BLOCK]; const auto& lambda = [&](int pos, int, int index) { d_pos[index] = pos; //d_offset[index] = offset; }; //xlib::binarySearchLB2<BLOCK_SIZE, ITEMS_PER_BLOCK / BLOCK_SIZE, true> // (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda); xlib::mergePathLB<BLOCK_SIZE, ITEMS_PER_BLOCK> (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda); } #if 0//used by (commented out) unrechable code, may better be deleted unless this code will be reused. const bool PRINT = false; const int BLOCK_SIZE = 128; #endif __device__ int d_value; template<int ITEMS_PER_BLOCK, int BLOCK_SIZE> __global__ void copyKernel(const int* __restrict__ input, int num_blocks, int smem_size) { __shared__ int smem[ITEMS_PER_BLOCK]; for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) { xlib::block::StrideOp<0, ITEMS_PER_BLOCK, BLOCK_SIZE> ::copy(input + i * ITEMS_PER_BLOCK, smem_size, smem); /*auto smem_tmp = smem + threadIdx.x; auto d_tmp = input + i * ITEMS_PER_BLOCK + threadIdx.x; #pragma unroll for (int i = 0; i < ITEMS_PER_BLOCK; i += BLOCK_SIZE) smem_tmp[i] = (i + threadIdx.x < smem_size) ? d_tmp[i] : 0;*/ if (threadIdx.x > 1023) d_value = smem[threadIdx.x]; } } template<int ITEMS_PER_BLOCK, int BLOCK_SIZE> __global__ void copyKernel2(const int* __restrict__ input, int num_blocks, int smem_size) { //__shared__ int smem[ITEMS_PER_BLOCK]; for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) { auto smem_tmp = xlib::dyn_smem + threadIdx.x; auto d_tmp = input + i * ITEMS_PER_BLOCK + threadIdx.x; for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) { *smem_tmp = *d_tmp; smem_tmp += BLOCK_SIZE; d_tmp += BLOCK_SIZE; } if (threadIdx.x > 1023) d_value = xlib::dyn_smem[threadIdx.x]; } } __global__ void noLambdaKernel(const int* __restrict__ ptr2, int* __restrict__ ptr1, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < size; i += stride) { ptr1[i] = ptr2[i]; ptr1[i + 10] = ptr2[i + 10]; ptr1[i + 20] = ptr2[i + 20]; } } template<typename Lambda> __global__ void lambdaKernel(Lambda lambda, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < size; i += stride) lambda(i); } template<typename Lambda, typename... TArgs> __global__ void lambdaKernel2(Lambda lambda, int size, TArgs* __restrict__ ... args) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < size; i += stride) lambda(i, args...); } struct LL { int* __restrict__ ptr1; const int* __restrict__ ptr2; __device__ __forceinline__ void operator()(int i) { const int* __restrict__ vv2 = ptr2; int* __restrict__ vv1 = ptr1; vv1[i] = vv2[i]; vv1[i + 10] = vv2[i + 10]; vv1[i + 20] = vv2[i + 20]; } }; int exec(int argc, char* argv[]) { using namespace graph; GraphStd<int, int> graph1; graph1.read(argv[1]); graph1.print_degree_distrib(); graph1.print_analysis(); auto weights = new int[graph1.nV()]; auto seed = std::chrono::high_resolution_clock::now().time_since_epoch() .count(); std::mt19937 engine(seed); std::uniform_int_distribution<int> distrib(0, 100); std::generate(weights, weights + graph1.nV(), [&](){ return distrib(engine); } ); GraphWeight<int, int, int> graph_weight(graph1.csr_out_edges(), graph1.nV(), graph1.csr_out_edges(), graph1.nE(), weights); Timer<HOST> TM1; Dijkstra<int, int, int> dijkstra(graph_weight); TM1.start(); for (int i = 0; i < graph1.nV(); i++) { dijkstra.run(i); dijkstra.reset(); } TM1.stop(); TM1.print("Dijkstra"); return 1; #if 0//unrechable code, may better be deleted unless this code will be reused. const int THREAD_ITEMS = 11; const int ITEMS_PER_BLOCK = BLOCK_SIZE * THREAD_ITEMS; int num_blocks_copy = 100000; int* d_input; gpu::allocate(d_input, ITEMS_PER_BLOCK * num_blocks_copy); Timer<DEVICE, micro> TM; TM.start(); hipLaunchKernelGGL(( copyKernel<ITEMS_PER_BLOCK, BLOCK_SIZE>) , dim3(num_blocks_copy), dim3(BLOCK_SIZE) , 0, 0, d_input, num_blocks_copy, 9 * BLOCK_SIZE); TM.stop(); TM.print("copy1"); TM.start(); hipLaunchKernelGGL(( copyKernel2<ITEMS_PER_BLOCK, BLOCK_SIZE>) , dim3(num_blocks_copy), dim3(BLOCK_SIZE) , 0, 0, d_input, num_blocks_copy, 9 * BLOCK_SIZE); TM.stop(); TM.print("copy2"); gpu::free(d_input); return 1; GraphStd<> graph; graph.read(argv[1], parsing_prop::PRINT_INFO | parsing_prop::RM_SINGLETON); int size = graph.nV(); auto prefixsum = graph.csr_out_offsets(); int ceil_total = xlib::upper_approx(graph.nE(), ITEMS_PER_BLOCK); //-------------------------------------------------------------------------- // HOST auto h_pos = new int[ceil_total]; auto h_offset = new int[ceil_total]; for (int i = 0, k = 0; i < size; i++) { for (int j = prefixsum[i]; j < prefixsum[i + 1]; j++) { h_pos[k] = i; h_offset[k++] = j - prefixsum[i]; } } for (int i = prefixsum[size]; i < ceil_total; i++) h_pos[i] = -1; //-------------------------------------------------------------------------- int num_merge = graph.nE() + graph.nV(); if (PRINT) { graph.print_raw(); std::cout << "Experted results:\n\n"; host::printArray(prefixsum, size + 1); host::printArray(h_pos, prefixsum[size]); host::printArray(h_offset, prefixsum[size]); } int* d_prefixsum, *d_pos, *d_offset, *d_partitions; int merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merge); int merge_block_partitions = xlib::ceil_div<BLOCK_SIZE>(merge_blocks); int num_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(graph.nE()); int num_block_partitions = xlib::ceil_div<BLOCK_SIZE>(num_blocks); std::cout << " THREAD_ITEMS: " << THREAD_ITEMS << "\n ITEMS_PER_BLOCK: " << ITEMS_PER_BLOCK << "\n Total items: " << graph.nE() << "\n Num blocks: " << num_blocks << "\n Num Merges Part.: " << merge_blocks << "\n" << std::endl; gpu::allocate(d_prefixsum, size + 1); gpu::allocate(d_pos, ceil_total); gpu::allocate(d_offset, ceil_total); gpu::allocate(d_partitions, merge_blocks + 1); host::copyToDevice(prefixsum, size + 1, d_prefixsum); gpu::memsetZero(d_pos, ceil_total); gpu::memsetZero(d_offset, ceil_total); gpu::memsetZero(d_partitions, num_blocks + 1); //-------------------------------------------------------------------------- TM.start(); hipLaunchKernelGGL(( xlib::mergePathLBPartition <ITEMS_PER_BLOCK>) , dim3(merge_block_partitions), dim3(BLOCK_SIZE) , 0, 0, d_prefixsum, size, graph.nE(), num_merge, d_partitions, merge_blocks); TM.stop(); TM.print("Partition: "); TM.start(); hipLaunchKernelGGL(( MergePathTest2<ITEMS_PER_BLOCK, BLOCK_SIZE>) , dim3(merge_blocks), dim3(BLOCK_SIZE) , 0, 0, d_partitions, merge_blocks, d_prefixsum, size + 1, d_pos, d_offset); TM.stop(); TM.print("BinarySearch: "); CHECK_CUDA_ERROR //-------------------------------------------------------------------------- if (PRINT) { std::cout << "Results:\n\n"; gpu::printArray(d_pos, graph.nE()); gpu::printArray(d_offset, graph.nE()); } std::cout << "\n Check Positions: " << gpu::equal(h_pos, h_pos + graph.nE(), d_pos) //<< "\n Check Offsets: " //<< gpu::equal(h_offset, h_offset + graph.nE(), d_offset) << "\n" << std::endl; //L1: #if defined(ENABLE_MGPU) using namespace mgpu; standard_context_t context; int num_segments = graph.nV(); int count = graph.nE(); const auto& vector = std::vector<int>(prefixsum, prefixsum + num_segments); mem_t<int> segments = to_mem(vector, context); mem_t<int> lbs(count, context); TM.start(); load_balance_search(count, segments.data(), num_segments, lbs.data(), context); TM.stop(); TM.print("ModernGPU: "); auto lbs_host = from_mem(lbs); std::cout << "\n Check Offsets: " << std::equal(h_pos, h_pos + graph.nE(), lbs_host.data()) << "\n" << std::endl; #endif gpu::free(d_partitions, d_offset, d_pos, d_prefixsum); return 0; #endif } int main(int argc, char* argv[]) { int ret = 0; #if defined(RMM_WRAPPER) gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. #endif ret = exec(argc, argv); #if defined(RMM_WRAPPER) }//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. gpu::finalizeRMMPoolAllocation(); #endif return ret; }
ebebb9913bbbcd9ef436ddc83091feb0fee0ff87.cu
#include "Host/Numeric.hpp" #include "Device/Util/DeviceProperties.cuh" #include "Device/Util/PrintExt.cuh" #include "Device/Util/Algorithm.cuh" #include "Device/Primitives/BinarySearchLB.cuh" #include "Device/Primitives/impl/BinarySearchLB2.i.cuh" #include "Device/Primitives/MergePathLB.cuh" #include "Device/Util/Timer.cuh" //#include <Graph/GraphBase.hpp> #include <Graph/GraphStd.hpp> #include <Graph/GraphWeight.hpp> #include <Graph/BellmanFord.hpp> #include <Graph/Dijkstra.hpp> #include <iostream> #include "Device/Util/Timer.cuh" #include "Device/DataMovement/impl/Block.i.cuh" #include <cooperative_groups.h> //#define ENABLE_MGPU #include <random> #include <chrono> #include "StandardAPI.hpp" #if defined(ENABLE_MGPU) #include <moderngpu/kernel_load_balance.hxx> #endif using namespace graph; using namespace timer; using namespace hornets_nest; template<int ITEMS_PER_BLOCK, int BLOCK_SIZE> __global__ void MergePathTest2(const int* __restrict__ d_partitions, int num_partitions, const int* __restrict__ d_prefixsum, int prefixsum_size, int* __restrict__ d_pos, int* __restrict__ d_offset) { __shared__ int smem[ITEMS_PER_BLOCK]; const auto& lambda = [&](int pos, int, int index) { d_pos[index] = pos; //d_offset[index] = offset; }; //xlib::binarySearchLB2<BLOCK_SIZE, ITEMS_PER_BLOCK / BLOCK_SIZE, true> // (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda); xlib::mergePathLB<BLOCK_SIZE, ITEMS_PER_BLOCK> (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda); } #if 0//used by (commented out) unrechable code, may better be deleted unless this code will be reused. const bool PRINT = false; const int BLOCK_SIZE = 128; #endif __device__ int d_value; template<int ITEMS_PER_BLOCK, int BLOCK_SIZE> __global__ void copyKernel(const int* __restrict__ input, int num_blocks, int smem_size) { __shared__ int smem[ITEMS_PER_BLOCK]; for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) { xlib::block::StrideOp<0, ITEMS_PER_BLOCK, BLOCK_SIZE> ::copy(input + i * ITEMS_PER_BLOCK, smem_size, smem); /*auto smem_tmp = smem + threadIdx.x; auto d_tmp = input + i * ITEMS_PER_BLOCK + threadIdx.x; #pragma unroll for (int i = 0; i < ITEMS_PER_BLOCK; i += BLOCK_SIZE) smem_tmp[i] = (i + threadIdx.x < smem_size) ? d_tmp[i] : 0;*/ if (threadIdx.x > 1023) d_value = smem[threadIdx.x]; } } template<int ITEMS_PER_BLOCK, int BLOCK_SIZE> __global__ void copyKernel2(const int* __restrict__ input, int num_blocks, int smem_size) { //__shared__ int smem[ITEMS_PER_BLOCK]; for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) { auto smem_tmp = xlib::dyn_smem + threadIdx.x; auto d_tmp = input + i * ITEMS_PER_BLOCK + threadIdx.x; for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) { *smem_tmp = *d_tmp; smem_tmp += BLOCK_SIZE; d_tmp += BLOCK_SIZE; } if (threadIdx.x > 1023) d_value = xlib::dyn_smem[threadIdx.x]; } } __global__ void noLambdaKernel(const int* __restrict__ ptr2, int* __restrict__ ptr1, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < size; i += stride) { ptr1[i] = ptr2[i]; ptr1[i + 10] = ptr2[i + 10]; ptr1[i + 20] = ptr2[i + 20]; } } template<typename Lambda> __global__ void lambdaKernel(Lambda lambda, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < size; i += stride) lambda(i); } template<typename Lambda, typename... TArgs> __global__ void lambdaKernel2(Lambda lambda, int size, TArgs* __restrict__ ... args) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < size; i += stride) lambda(i, args...); } struct LL { int* __restrict__ ptr1; const int* __restrict__ ptr2; __device__ __forceinline__ void operator()(int i) { const int* __restrict__ vv2 = ptr2; int* __restrict__ vv1 = ptr1; vv1[i] = vv2[i]; vv1[i + 10] = vv2[i + 10]; vv1[i + 20] = vv2[i + 20]; } }; int exec(int argc, char* argv[]) { using namespace graph; GraphStd<int, int> graph1; graph1.read(argv[1]); graph1.print_degree_distrib(); graph1.print_analysis(); auto weights = new int[graph1.nV()]; auto seed = std::chrono::high_resolution_clock::now().time_since_epoch() .count(); std::mt19937 engine(seed); std::uniform_int_distribution<int> distrib(0, 100); std::generate(weights, weights + graph1.nV(), [&](){ return distrib(engine); } ); GraphWeight<int, int, int> graph_weight(graph1.csr_out_edges(), graph1.nV(), graph1.csr_out_edges(), graph1.nE(), weights); Timer<HOST> TM1; Dijkstra<int, int, int> dijkstra(graph_weight); TM1.start(); for (int i = 0; i < graph1.nV(); i++) { dijkstra.run(i); dijkstra.reset(); } TM1.stop(); TM1.print("Dijkstra"); return 1; #if 0//unrechable code, may better be deleted unless this code will be reused. const int THREAD_ITEMS = 11; const int ITEMS_PER_BLOCK = BLOCK_SIZE * THREAD_ITEMS; int num_blocks_copy = 100000; int* d_input; gpu::allocate(d_input, ITEMS_PER_BLOCK * num_blocks_copy); Timer<DEVICE, micro> TM; TM.start(); copyKernel<ITEMS_PER_BLOCK, BLOCK_SIZE> <<< num_blocks_copy, BLOCK_SIZE >>> (d_input, num_blocks_copy, 9 * BLOCK_SIZE); TM.stop(); TM.print("copy1"); TM.start(); copyKernel2<ITEMS_PER_BLOCK, BLOCK_SIZE> <<< num_blocks_copy, BLOCK_SIZE >>> (d_input, num_blocks_copy, 9 * BLOCK_SIZE); TM.stop(); TM.print("copy2"); gpu::free(d_input); return 1; GraphStd<> graph; graph.read(argv[1], parsing_prop::PRINT_INFO | parsing_prop::RM_SINGLETON); int size = graph.nV(); auto prefixsum = graph.csr_out_offsets(); int ceil_total = xlib::upper_approx(graph.nE(), ITEMS_PER_BLOCK); //-------------------------------------------------------------------------- // HOST auto h_pos = new int[ceil_total]; auto h_offset = new int[ceil_total]; for (int i = 0, k = 0; i < size; i++) { for (int j = prefixsum[i]; j < prefixsum[i + 1]; j++) { h_pos[k] = i; h_offset[k++] = j - prefixsum[i]; } } for (int i = prefixsum[size]; i < ceil_total; i++) h_pos[i] = -1; //-------------------------------------------------------------------------- int num_merge = graph.nE() + graph.nV(); if (PRINT) { graph.print_raw(); std::cout << "Experted results:\n\n"; host::printArray(prefixsum, size + 1); host::printArray(h_pos, prefixsum[size]); host::printArray(h_offset, prefixsum[size]); } int* d_prefixsum, *d_pos, *d_offset, *d_partitions; int merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merge); int merge_block_partitions = xlib::ceil_div<BLOCK_SIZE>(merge_blocks); int num_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(graph.nE()); int num_block_partitions = xlib::ceil_div<BLOCK_SIZE>(num_blocks); std::cout << " THREAD_ITEMS: " << THREAD_ITEMS << "\n ITEMS_PER_BLOCK: " << ITEMS_PER_BLOCK << "\n Total items: " << graph.nE() << "\n Num blocks: " << num_blocks << "\n Num Merges Part.: " << merge_blocks << "\n" << std::endl; gpu::allocate(d_prefixsum, size + 1); gpu::allocate(d_pos, ceil_total); gpu::allocate(d_offset, ceil_total); gpu::allocate(d_partitions, merge_blocks + 1); host::copyToDevice(prefixsum, size + 1, d_prefixsum); gpu::memsetZero(d_pos, ceil_total); gpu::memsetZero(d_offset, ceil_total); gpu::memsetZero(d_partitions, num_blocks + 1); //-------------------------------------------------------------------------- TM.start(); xlib::mergePathLBPartition <ITEMS_PER_BLOCK> <<< merge_block_partitions, BLOCK_SIZE >>> (d_prefixsum, size, graph.nE(), num_merge, d_partitions, merge_blocks); TM.stop(); TM.print("Partition: "); TM.start(); MergePathTest2<ITEMS_PER_BLOCK, BLOCK_SIZE> <<< merge_blocks, BLOCK_SIZE >>> (d_partitions, merge_blocks, d_prefixsum, size + 1, d_pos, d_offset); TM.stop(); TM.print("BinarySearch: "); CHECK_CUDA_ERROR //-------------------------------------------------------------------------- if (PRINT) { std::cout << "Results:\n\n"; gpu::printArray(d_pos, graph.nE()); gpu::printArray(d_offset, graph.nE()); } std::cout << "\n Check Positions: " << gpu::equal(h_pos, h_pos + graph.nE(), d_pos) //<< "\n Check Offsets: " //<< gpu::equal(h_offset, h_offset + graph.nE(), d_offset) << "\n" << std::endl; //L1: #if defined(ENABLE_MGPU) using namespace mgpu; standard_context_t context; int num_segments = graph.nV(); int count = graph.nE(); const auto& vector = std::vector<int>(prefixsum, prefixsum + num_segments); mem_t<int> segments = to_mem(vector, context); mem_t<int> lbs(count, context); TM.start(); load_balance_search(count, segments.data(), num_segments, lbs.data(), context); TM.stop(); TM.print("ModernGPU: "); auto lbs_host = from_mem(lbs); std::cout << "\n Check Offsets: " << std::equal(h_pos, h_pos + graph.nE(), lbs_host.data()) << "\n" << std::endl; #endif gpu::free(d_partitions, d_offset, d_pos, d_prefixsum); return 0; #endif } int main(int argc, char* argv[]) { int ret = 0; #if defined(RMM_WRAPPER) gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. #endif ret = exec(argc, argv); #if defined(RMM_WRAPPER) }//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. gpu::finalizeRMMPoolAllocation(); #endif return ret; }
3a27de02f8b9eb347cbced644e8855f778a02fa1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Program: addVector This is a modification of the addVectorCUDA.cu from the class folder. The modification was done to complete number 3 on Homework #4. Changes made to the original program inclues function calculations, location of code statments, and adding/removing comments. This was done so to complete the assignment as well as to understand the logic behing parallel coding using CUDA GPU. Author: Inanc Senocak Editor: Dustin (Ting-Hsuan) Ma Compile: nvcc -O2 addVectorCUDA.cu -o run.exe Execute: ./run.exe */ #include "timer.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/resource.h> #define NX 1000000000 #define RADIUS 5 #define BLOCK_SIZE 256 #define SIZE BLOCK_SIZE + 2 * RADIUS typedef float REAL; __global__ void GPU_stencil(REAL *in, REAL *out) { __shared__ REAL tmp[SIZE]; // This is the correct way to dynamically allocate memory for each thread // defining the index used by global and local array int gindex = blockIdx.x * blockDim.x + threadIdx.x; int lindex = threadIdx.x + RADIUS; // setting array elemtns into tmp array tmp[lindex] = in[gindex]; __syncthreads(); // Applying the stencil REAL sum = 0.0f; for (int j = -RADIUS; j <= RADIUS; j++) { sum += tmp[lindex + j]; } // Store the result out[gindex] = sum; } void CPU_stencil(REAL *in, REAL *out) { // CPU stencil done in class for (int i = RADIUS; i < NX; i++) { REAL sum = 0.0f; for (int j = -RADIUS; j <= RADIUS; j++) { sum += in[i + j]; } out[i] = sum; } } int main(void) { // Allocating memory for CPU REAL *a = (REAL *) malloc(NX * sizeof(*a)); REAL *b = (REAL *) malloc(NX * sizeof(*b)); // Allocating memory for GPU REAL *d_a, *d_b; hipMallocManaged(&d_a, NX * sizeof(REAL)); hipMallocManaged(&d_b, NX * sizeof(REAL)); REAL *c = (REAL *) malloc(NX * sizeof(*c)); // created to store values from Device to Host // Let's fill the arrays with some numbers for (int i = 0; i < NX; i++) { a[i] = 0.0f; b[i] = 2.0f; c[i] = 0.0f; } // *********************CPU************************ double start, finish; // time for CPU REAL elapsedTime; // in float because it is recorded in ms GET_TIME(start); CPU_stencil(b, a); // calling CPU function GET_TIME(finish); // Outputting answer for CPU calculation printf("|============================CPU============================|\n"); printf("a[%d] = %4f, elapsed wall time (host) = %.6f seconds \n", RADIUS, a[RADIUS], finish - start); printf("\n"); // *********************GPU************************ int nBlocks = (NX + BLOCK_SIZE - 1) / BLOCK_SIZE; // allows n to round up // Copying array memory from host to device hipMemcpy(d_b, b, NX * sizeof(REAL), hipMemcpyHostToDevice); hipEvent_t timeStart, timeStop; // hipEvent_t initializes variable used in event time hipEventCreate(&timeStart); hipEventCreate(&timeStop); hipEventRecord(timeStart, 0); hipLaunchKernelGGL(( GPU_stencil), dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, d_b, d_a); // replaced <<<1,1>>> with current hipEventRecord(timeStop, 0); hipEventSynchronize(timeStop); hipEventElapsedTime(&elapsedTime, timeStart, timeStop); // Copying result array from device back to memory hipMemcpy(c, d_a, NX * sizeof(REAL), hipMemcpyDeviceToHost); // Outputting answer for GPU calculation printf("|============================GPU============================|\n"); printf("c[%d] = %4f, elapsed wall time (device) = %3.1f ms\n", RADIUS, c[RADIUS], elapsedTime); // Removing event created for timing the calculation hipEventDestroy(timeStart); hipEventDestroy(timeStop); // Deallocating memory used for host and device free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); return EXIT_SUCCESS; }
3a27de02f8b9eb347cbced644e8855f778a02fa1.cu
/* Program: addVector This is a modification of the addVectorCUDA.cu from the class folder. The modification was done to complete number 3 on Homework #4. Changes made to the original program inclues function calculations, location of code statments, and adding/removing comments. This was done so to complete the assignment as well as to understand the logic behing parallel coding using CUDA GPU. Author: Inanc Senocak Editor: Dustin (Ting-Hsuan) Ma Compile: nvcc -O2 addVectorCUDA.cu -o run.exe Execute: ./run.exe */ #include "timer.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/resource.h> #define NX 1000000000 #define RADIUS 5 #define BLOCK_SIZE 256 #define SIZE BLOCK_SIZE + 2 * RADIUS typedef float REAL; __global__ void GPU_stencil(REAL *in, REAL *out) { __shared__ REAL tmp[SIZE]; // This is the correct way to dynamically allocate memory for each thread // defining the index used by global and local array int gindex = blockIdx.x * blockDim.x + threadIdx.x; int lindex = threadIdx.x + RADIUS; // setting array elemtns into tmp array tmp[lindex] = in[gindex]; __syncthreads(); // Applying the stencil REAL sum = 0.0f; for (int j = -RADIUS; j <= RADIUS; j++) { sum += tmp[lindex + j]; } // Store the result out[gindex] = sum; } void CPU_stencil(REAL *in, REAL *out) { // CPU stencil done in class for (int i = RADIUS; i < NX; i++) { REAL sum = 0.0f; for (int j = -RADIUS; j <= RADIUS; j++) { sum += in[i + j]; } out[i] = sum; } } int main(void) { // Allocating memory for CPU REAL *a = (REAL *) malloc(NX * sizeof(*a)); REAL *b = (REAL *) malloc(NX * sizeof(*b)); // Allocating memory for GPU REAL *d_a, *d_b; cudaMallocManaged(&d_a, NX * sizeof(REAL)); cudaMallocManaged(&d_b, NX * sizeof(REAL)); REAL *c = (REAL *) malloc(NX * sizeof(*c)); // created to store values from Device to Host // Let's fill the arrays with some numbers for (int i = 0; i < NX; i++) { a[i] = 0.0f; b[i] = 2.0f; c[i] = 0.0f; } // *********************CPU************************ double start, finish; // time for CPU REAL elapsedTime; // in float because it is recorded in ms GET_TIME(start); CPU_stencil(b, a); // calling CPU function GET_TIME(finish); // Outputting answer for CPU calculation printf("|============================CPU============================|\n"); printf("a[%d] = %4f, elapsed wall time (host) = %.6f seconds \n", RADIUS, a[RADIUS], finish - start); printf("\n"); // *********************GPU************************ int nBlocks = (NX + BLOCK_SIZE - 1) / BLOCK_SIZE; // allows n to round up // Copying array memory from host to device cudaMemcpy(d_b, b, NX * sizeof(REAL), cudaMemcpyHostToDevice); cudaEvent_t timeStart, timeStop; // cudaEvent_t initializes variable used in event time cudaEventCreate(&timeStart); cudaEventCreate(&timeStop); cudaEventRecord(timeStart, 0); GPU_stencil<<<nBlocks, BLOCK_SIZE>>>(d_b, d_a); // replaced <<<1,1>>> with current cudaEventRecord(timeStop, 0); cudaEventSynchronize(timeStop); cudaEventElapsedTime(&elapsedTime, timeStart, timeStop); // Copying result array from device back to memory cudaMemcpy(c, d_a, NX * sizeof(REAL), cudaMemcpyDeviceToHost); // Outputting answer for GPU calculation printf("|============================GPU============================|\n"); printf("c[%d] = %4f, elapsed wall time (device) = %3.1f ms\n", RADIUS, c[RADIUS], elapsedTime); // Removing event created for timing the calculation cudaEventDestroy(timeStart); cudaEventDestroy(timeStop); // Deallocating memory used for host and device free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); return EXIT_SUCCESS; }
cf4e37567b9f2df7eab17e7e777ee974f73ba378.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include "hipcub/hipcub.hpp" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to DeviceGuard CUDAContext context(-1); // take current device CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is garded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { DeviceGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = ::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for hipDeviceEnablePeerAccess that should always be // zero currently. CUDA_ENFORCE(hipDeviceEnablePeerAccess(j, 0)); } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new hipcub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "hipHostMalloc. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(&g_pinned_cpu_alloc); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: CUDA_ENFORCE(hipMalloc(&ptr, nbytes)); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. hip::HIPStreamGuardMasqueradingAsCUDA g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = hip::HIPCachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple hipFree. hipError_t error = hipFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != hipSuccess && error != hipErrorDeinitialized) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { hip::HIPCachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
cf4e37567b9f2df7eab17e7e777ee974f73ba378.cu
#include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <c10/cuda/CUDACachingAllocator.h> #include "cub/util_allocator.cuh" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to DeviceGuard CUDAContext context(-1); // take current device CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is garded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { DeviceGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = std::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for cudaDeviceEnablePeerAccess that should always be // zero currently. CUDA_ENFORCE(cudaDeviceEnablePeerAccess(j, 0)); } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new cub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "cudaMallocHost. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(&g_pinned_cpu_alloc); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: CUDA_ENFORCE(cudaMalloc(&ptr, nbytes)); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. cuda::CUDAStreamGuard g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = cuda::CUDACachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple cudaFree. cudaError_t error = cudaFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != cudaSuccess && error != cudaErrorCudartUnloading) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { cuda::CUDACachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
6c2015d7b79d9dcac98f3ca67187b1c458513dc0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include"_reg_resampling.h" #include"_reg_maths.h" #include "_reg_common_cuda.h" #include"_reg_tools.h" #include"_reg_ReadWriteImage.h" #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/gather.h> #include "affineDeformationKernel.h" //CUDA affine kernel /* *************************************************************** */ __device__ __inline__ void getPosition(float* position, float* matrix, double* voxel, const unsigned int idx) { position[idx] = (float) ((double) matrix[idx * 4 + 0] * voxel[0] + (double) matrix[idx * 4 + 1] * voxel[1] + (double) matrix[idx * 4 + 2] * voxel[2] + (double) matrix[idx * 4 + 3]); } /* *************************************************************** */ __device__ __inline__ double getPosition(float* matrix, double* voxel, const unsigned int idx) { unsigned long index = idx * 4; return (double)matrix[index++] * voxel[0] + (double)matrix[index++] * voxel[1] + (double)matrix[index++] * voxel[2] + (double)matrix[index]; } /* *************************************************************** */ __global__ void affineKernel(float* transformationMatrix, float* defField, int* mask, const uint3 dims, const unsigned long voxelNumber, const bool composition) { // Get the current coordinate const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int z = blockIdx.z * blockDim.z + threadIdx.z; const unsigned long index = x + dims.x * (y + z * dims.y); if (z<dims.z && y<dims.y && x<dims.x && mask[index] >= 0) { double voxel[3]; float *deformationFieldPtrX = &defField[index]; float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber]; float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber]; voxel[0] = composition ? *deformationFieldPtrX : x; voxel[1] = composition ? *deformationFieldPtrY : y; voxel[2] = composition ? *deformationFieldPtrZ : z; /* the deformation field (real coordinates) is stored */ *deformationFieldPtrX = (float)getPosition(transformationMatrix, voxel, 0); *deformationFieldPtrY = (float)getPosition(transformationMatrix, voxel, 1); *deformationFieldPtrZ = (float)getPosition(transformationMatrix, voxel, 2); } } /* *************************************************************** */ void launchAffine(mat44 *affineTransformation, nifti_image *deformationField, float **def_d, int **mask_d, float **trans_d, bool compose) { const unsigned int xThreads = 8; const unsigned int yThreads = 8; const unsigned int zThreads = 8; const unsigned int xBlocks = ((deformationField->nx % xThreads) == 0) ? (deformationField->nx / xThreads) : (deformationField->nx / xThreads) + 1; const unsigned int yBlocks = ((deformationField->ny % yThreads) == 0) ? (deformationField->ny / yThreads) : (deformationField->ny / yThreads) + 1; const unsigned int zBlocks = ((deformationField->nz % zThreads) == 0) ? (deformationField->nz / zThreads) : (deformationField->nz / zThreads) + 1; dim3 G1_b(xBlocks, yBlocks, zBlocks); dim3 B1_b(xThreads, yThreads, zThreads); float* trans = (float *)malloc(16 * sizeof(float)); const mat44 *targetMatrix = (deformationField->sform_code > 0) ? &(deformationField->sto_xyz) : &(deformationField->qto_xyz); mat44 transformationMatrix = (compose == true) ? *affineTransformation : reg_mat44_mul(affineTransformation, targetMatrix); mat44ToCptr(transformationMatrix, trans); NR_CUDA_SAFE_CALL(hipMemcpy(*trans_d, trans, 16 * sizeof(float), hipMemcpyHostToDevice)); free(trans); uint3 dims_d = make_uint3(deformationField->nx, deformationField->ny, deformationField->nz); affineKernel << <G1_b, B1_b >> >(*trans_d, *def_d, *mask_d, dims_d, deformationField->nx* deformationField->ny* deformationField->nz, compose); #ifndef NDEBUG NR_CUDA_CHECK_KERNEL(G1_b, B1_b) #else NR_CUDA_SAFE_CALL(hipDeviceSynchronize()); #endif }
6c2015d7b79d9dcac98f3ca67187b1c458513dc0.cu
#include <stdio.h> #include <assert.h> #include "cuda_runtime.h" #include "cuda.h" #include"_reg_resampling.h" #include"_reg_maths.h" #include "_reg_common_cuda.h" #include"_reg_tools.h" #include"_reg_ReadWriteImage.h" #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/gather.h> #include "affineDeformationKernel.h" //CUDA affine kernel /* *************************************************************** */ __device__ __inline__ void getPosition(float* position, float* matrix, double* voxel, const unsigned int idx) { position[idx] = (float) ((double) matrix[idx * 4 + 0] * voxel[0] + (double) matrix[idx * 4 + 1] * voxel[1] + (double) matrix[idx * 4 + 2] * voxel[2] + (double) matrix[idx * 4 + 3]); } /* *************************************************************** */ __device__ __inline__ double getPosition(float* matrix, double* voxel, const unsigned int idx) { unsigned long index = idx * 4; return (double)matrix[index++] * voxel[0] + (double)matrix[index++] * voxel[1] + (double)matrix[index++] * voxel[2] + (double)matrix[index]; } /* *************************************************************** */ __global__ void affineKernel(float* transformationMatrix, float* defField, int* mask, const uint3 dims, const unsigned long voxelNumber, const bool composition) { // Get the current coordinate const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int z = blockIdx.z * blockDim.z + threadIdx.z; const unsigned long index = x + dims.x * (y + z * dims.y); if (z<dims.z && y<dims.y && x<dims.x && mask[index] >= 0) { double voxel[3]; float *deformationFieldPtrX = &defField[index]; float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber]; float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber]; voxel[0] = composition ? *deformationFieldPtrX : x; voxel[1] = composition ? *deformationFieldPtrY : y; voxel[2] = composition ? *deformationFieldPtrZ : z; /* the deformation field (real coordinates) is stored */ *deformationFieldPtrX = (float)getPosition(transformationMatrix, voxel, 0); *deformationFieldPtrY = (float)getPosition(transformationMatrix, voxel, 1); *deformationFieldPtrZ = (float)getPosition(transformationMatrix, voxel, 2); } } /* *************************************************************** */ void launchAffine(mat44 *affineTransformation, nifti_image *deformationField, float **def_d, int **mask_d, float **trans_d, bool compose) { const unsigned int xThreads = 8; const unsigned int yThreads = 8; const unsigned int zThreads = 8; const unsigned int xBlocks = ((deformationField->nx % xThreads) == 0) ? (deformationField->nx / xThreads) : (deformationField->nx / xThreads) + 1; const unsigned int yBlocks = ((deformationField->ny % yThreads) == 0) ? (deformationField->ny / yThreads) : (deformationField->ny / yThreads) + 1; const unsigned int zBlocks = ((deformationField->nz % zThreads) == 0) ? (deformationField->nz / zThreads) : (deformationField->nz / zThreads) + 1; dim3 G1_b(xBlocks, yBlocks, zBlocks); dim3 B1_b(xThreads, yThreads, zThreads); float* trans = (float *)malloc(16 * sizeof(float)); const mat44 *targetMatrix = (deformationField->sform_code > 0) ? &(deformationField->sto_xyz) : &(deformationField->qto_xyz); mat44 transformationMatrix = (compose == true) ? *affineTransformation : reg_mat44_mul(affineTransformation, targetMatrix); mat44ToCptr(transformationMatrix, trans); NR_CUDA_SAFE_CALL(cudaMemcpy(*trans_d, trans, 16 * sizeof(float), cudaMemcpyHostToDevice)); free(trans); uint3 dims_d = make_uint3(deformationField->nx, deformationField->ny, deformationField->nz); affineKernel << <G1_b, B1_b >> >(*trans_d, *def_d, *mask_d, dims_d, deformationField->nx* deformationField->ny* deformationField->nz, compose); #ifndef NDEBUG NR_CUDA_CHECK_KERNEL(G1_b, B1_b) #else NR_CUDA_SAFE_CALL(cudaThreadSynchronize()); #endif }
9a74a07c7e87256792508556d7fbbff5c40ebead.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // TODO: This code is currently unused. Update the implementation to work with ZNCC-based cost? // To have the residuals conform to what works well with Gauss-Newton, could use an affine brightness mapping (with optimized factor & bias parameters) // instead of the ZNCC computation, which should achieve the same affine invariance. // // (Mostly) auto-generated function. // typedef float Scalar; // // // opcount = 243 // __forceinline__ __device__ void ComputeResidualAndJacobian( // Scalar cx, Scalar cy, Scalar fx, Scalar fy, // Scalar inv_depth, Scalar n_x, Scalar n_y, // Scalar nx, Scalar ny, // Scalar other_nx, Scalar other_ny, // Scalar ref_intensity, // Scalar str_0_0, Scalar str_0_1, Scalar str_0_2, Scalar str_0_3, // Scalar str_1_0, Scalar str_1_1, Scalar str_1_2, Scalar str_1_3, // Scalar str_2_0, Scalar str_2_1, Scalar str_2_2, Scalar str_2_3, // hipTextureObject_t stereo_texture, // Scalar* residuals, Scalar* jacobian) { // const Scalar term0 = sqrt(-n_x*n_x - n_y*n_y + 1); // const Scalar term1 = n_x*other_nx + n_y*other_ny - term0; // const Scalar term2 = 1.0f/term1; // const Scalar term3 = str_1_2*term2; // const Scalar term4 = 1.0f/inv_depth; // const Scalar term5 = n_x*nx; // const Scalar term6 = n_y*ny; // const Scalar term7 = -term0*term4 + term4*term5 + term4*term6; // const Scalar term8 = other_nx*str_1_0*term2; // const Scalar term9 = other_ny*str_1_1*term2; // const Scalar term10 = str_1_3 + term3*term7 + term7*term8 + term7*term9; // const Scalar term11 = str_2_2*term2; // const Scalar term12 = other_nx*str_2_0*term2; // const Scalar term13 = other_ny*str_2_1*term2; // const Scalar term14 = str_2_3 + term11*term7 + term12*term7 + term13*term7; // const Scalar term15 = 1.0f/term14; // const Scalar term16 = fy*term15; // // float py = cy + term10*term16; // int iy = static_cast<int>(py); // const Scalar term17 = py - iy; // // const Scalar term18 = str_0_2*term2; // const Scalar term19 = other_nx*str_0_0*term2; // const Scalar term20 = other_ny*str_0_1*term2; // const Scalar term21 = str_0_3 + term18*term7 + term19*term7 + term20*term7; // const Scalar term22 = fx*term15; // // float px = cx + term21*term22; // int ix = static_cast<int>(px); // const Scalar term23 = px - ix; // // Scalar top_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 0.5f); // Scalar top_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 0.5f); // Scalar bottom_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 1.5f); // Scalar bottom_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 1.5f); // // const Scalar term24 = -term23 + 1; // const Scalar term25 = bottom_left*term24 + bottom_right*term23; // const Scalar term26 = -term17 + 1; // const Scalar term27 = term23*top_right; // const Scalar term28 = term24*top_left; // const Scalar term29 = -term17*(bottom_left - bottom_right) - term26*(top_left - top_right); // const Scalar term30 = term4 * term4; // const Scalar term31 = term0 - term5 - term6; // const Scalar term32 = term30*term31; // const Scalar term33 = term15 * term15; // const Scalar term34 = term30*term31*term33*(term11 + term12 + term13); // const Scalar term35 = term25 - term27 - term28; // const Scalar term36 = 1.0f/term0; // const Scalar term37 = n_x*term36; // const Scalar term38 = nx*term4 + term37*term4; // const Scalar term39 = -other_nx - term37; // const Scalar term40 = term2 * term2; // // const Scalar term40Xterm7 = term40*term7; // // const Scalar term41 = str_0_2*term40Xterm7; // const Scalar term42 = other_nx*str_0_0*term40Xterm7; // const Scalar term43 = other_ny*str_0_1*term40Xterm7; // const Scalar term44 = fx*term21*term33; // const Scalar term45 = str_2_2*term40Xterm7; // const Scalar term46 = other_nx*str_2_0*term40Xterm7; // const Scalar term47 = other_ny*str_2_1*term40Xterm7; // const Scalar term48 = -term11*term38 - term12*term38 - term13*term38 - term39*term45 - term39*term46 - term39*term47; // const Scalar term49 = str_1_2*term40Xterm7; // const Scalar term50 = other_nx*str_1_0*term40Xterm7; // const Scalar term51 = other_ny*str_1_1*term40Xterm7; // const Scalar term52 = fy*term10*term33; // const Scalar term53 = n_y*term36; // const Scalar term54 = ny*term4 + term4*term53; // const Scalar term55 = -other_ny - term53; // const Scalar term56 = -term11*term54 - term12*term54 - term13*term54 - term45*term55 - term46*term55 - term47*term55; // // *residuals = -ref_intensity + term17*term25 + term26*(term27 + term28); // jacobian[0] = term29*(-fx*term21*term34 + term22*(term18*term32 + term19*term32 + term20*term32)) + term35*(-fy*term10*term34 + term16*(term3*term32 + term32*term8 + term32*term9)); // jacobian[1] = term29*(term22*(term18*term38 + term19*term38 + term20*term38 + term39*term41 + term39*term42 + term39*term43) + term44*term48) + term35*(term16*(term3*term38 + term38*term8 + term38*term9 + term39*term49 + term39*term50 + term39*term51) + term48*term52); // jacobian[2] = term29*(term22*(term18*term54 + term19*term54 + term20*term54 + term41*term55 + term42*term55 + term43*term55) + term44*term56) + term35*(term16*(term3*term54 + term49*term55 + term50*term55 + term51*term55 + term54*term8 + term54*term9) + term52*term56); // } // // template <int kContextRadius> // __global__ void PatchMatchOptimizationStepCUDAKernel( // int match_metric, // float max_normal_2d_length, // CUDAUnprojectionLookup2D_ unprojector, // CUDABuffer_<u8> reference_image, // hipTextureObject_t reference_texture, // CUDAMatrix3x4 stereo_tr_reference, // PixelCornerProjector projector, // hipTextureObject_t stereo_image, // CUDABuffer_<float> inv_depth_map, // CUDABuffer_<char2> normals, // CUDABuffer_<float> costs, // CUDABuffer_<hiprandState_t> random_states, // CUDABuffer_<float> lambda) { // unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; // unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // // if (x >= kContextRadius && y >= kContextRadius && // x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { // float inv_depth = inv_depth_map(y, x); // char2 normal_xy_char = normals(y, x); // float2 normal_xy = make_float2( // normal_xy_char.x * (1 / 127.f), normal_xy_char.y * (1 / 127.f)); // float2 nxy = unprojector.UnprojectPoint(x, y); // // // Gauss-Newton update equation coefficients. // float H[3 + 2 + 1] = {0, 0, 0, 0, 0, 0}; // float b[3] = {0, 0, 0}; // // #pragma unroll // for (int dy = -kContextRadius; dy <= kContextRadius; ++ dy) { // #pragma unroll // for (int dx = -kContextRadius; dx <= kContextRadius; ++ dx) { // float raw_residual; // float jacobian[3]; // // float2 other_nxy = unprojector.UnprojectPoint(x + dx, y + dy); // // ComputeResidualAndJacobian( // projector.cx - 0.5f, projector.cy - 0.5f, projector.fx, projector.fy, // inv_depth, normal_xy.x, normal_xy.y, // nxy.x, nxy.y, // other_nxy.x, other_nxy.y, // reference_image(y + dy, x + dx), // stereo_tr_reference.row0.x, stereo_tr_reference.row0.y, stereo_tr_reference.row0.z, stereo_tr_reference.row0.w, // stereo_tr_reference.row1.x, stereo_tr_reference.row1.y, stereo_tr_reference.row1.z, stereo_tr_reference.row1.w, // stereo_tr_reference.row2.x, stereo_tr_reference.row2.y, stereo_tr_reference.row2.z, stereo_tr_reference.row2.w, // stereo_image, // &raw_residual, jacobian); // // // Accumulate // b[0] += raw_residual * jacobian[0]; // b[1] += raw_residual * jacobian[1]; // b[2] += raw_residual * jacobian[2]; // // H[0] += jacobian[0] * jacobian[0]; // H[1] += jacobian[0] * jacobian[1]; // H[2] += jacobian[0] * jacobian[2]; // // H[3] += jacobian[1] * jacobian[1]; // H[4] += jacobian[1] * jacobian[2]; // // H[5] += jacobian[2] * jacobian[2]; // } // } // // /*// TEST: Optimize inv_depth only // b[0] = b[0] / H[0]; // inv_depth -= b[0];*/ // // // Levenberg-Marquardt // const float kDiagLambda = lambda(y, x); // H[0] *= kDiagLambda; // H[3] *= kDiagLambda; // H[5] *= kDiagLambda; // // // Solve for the update using Cholesky decomposition // // (H[0] ) (H[0] H[1] H[2]) (x[0]) (b[0]) // // (H[1] H[3] ) * ( H[3] H[4]) * (x[1]) = (b[1]) // // (H[2] H[4] H[5]) ( H[5]) (x[2]) (b[2]) // H[0] = sqrtf(H[0]); // // H[1] = 1.f / H[0] * H[1]; // H[3] = sqrtf(H[3] - H[1] * H[1]); // // H[2] = 1.f / H[0] * H[2]; // H[4] = 1.f / H[3] * (H[4] - H[1] * H[2]); // H[5] = sqrtf(H[5] - H[2] * H[2] - H[4] * H[4]); // // // Re-use b for the intermediate vector // b[0] = (b[0] / H[0]); // b[1] = (b[1] - H[1] * b[0]) / H[3]; // b[2] = (b[2] - H[2] * b[0] - H[4] * b[1]) / H[5]; // // // Re-use b for the delta vector // b[2] = (b[2] / H[5]); // b[1] = (b[1] - H[4] * b[2]) / H[3]; // b[0] = (b[0] - H[1] * b[1] - H[2] * b[2]) / H[0]; // // // Apply the update, sanitize normal if necessary // inv_depth -= b[0]; // normal_xy.x -= b[1]; // normal_xy.y -= b[2]; // // float length = sqrtf(normal_xy.x * normal_xy.x + normal_xy.y * normal_xy.y); // if (length > max_normal_2d_length) { // normal_xy.x *= max_normal_2d_length / length; // normal_xy.y *= max_normal_2d_length / length; // } // // // Test whether the update lowers the cost // float proposal_costs = ComputeCosts<kContextRadius>( // x, y, // normal_xy, // inv_depth, // unprojector, // reference_image, // reference_texture, // stereo_tr_reference, // projector, // stereo_image, // match_metric, // 0, // TODO: Update if using this function again // CUDABuffer_<float>()); // TODO: Update if using this function again // // if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { // costs(y, x) = proposal_costs; // normals(y, x) = make_char2(normal_xy.x * 127.f, normal_xy.y * 127.f); // TODO: in this and similar places: rounding? // inv_depth_map(y, x) = inv_depth; // // lambda(y, x) *= 0.5f; // } else { // lambda(y, x) *= 2.f; // } // } // } // // void PatchMatchOptimizationStepCUDA( // hipStream_t stream, // int match_metric, // int context_radius, // float max_normal_2d_length, // hipTextureObject_t reference_unprojection_lookup, // const CUDABuffer_<u8>& reference_image, // hipTextureObject_t reference_texture, // const CUDAMatrix3x4& stereo_tr_reference, // const PixelCornerProjector_& stereo_camera, // const hipTextureObject_t stereo_image, // CUDABuffer_<float>* inv_depth_map, // CUDABuffer_<char2>* normals, // CUDABuffer_<float>* costs, // CUDABuffer_<hiprandState_t>* random_states, // CUDABuffer_<float>* lambda) { // CHECK_CUDA_NO_ERROR(); // COMPILE_INT_4_OPTIONS(context_radius, 5, 8, 10, 15, CUDA_AUTO_TUNE_2D( // PatchMatchOptimizationStepCUDAKernel<_context_radius>, // 16, 16, // inv_depth_map->width(), inv_depth_map->height(), // 0, stream, // /* kernel parameters */ // match_metric, // max_normal_2d_length, // CUDAUnprojectionLookup2D_(reference_unprojection_lookup), // reference_image, // reference_texture, // stereo_tr_reference, // stereo_camera, // stereo_image, // stereo_camera.width(), // stereo_camera.height(), // *inv_depth_map, // *normals, // *costs, // *random_states, // *lambda)); // hipDeviceSynchronize(); // CHECK_CUDA_NO_ERROR(); // }
9a74a07c7e87256792508556d7fbbff5c40ebead.cu
// TODO: This code is currently unused. Update the implementation to work with ZNCC-based cost? // To have the residuals conform to what works well with Gauss-Newton, could use an affine brightness mapping (with optimized factor & bias parameters) // instead of the ZNCC computation, which should achieve the same affine invariance. // // (Mostly) auto-generated function. // typedef float Scalar; // // // opcount = 243 // __forceinline__ __device__ void ComputeResidualAndJacobian( // Scalar cx, Scalar cy, Scalar fx, Scalar fy, // Scalar inv_depth, Scalar n_x, Scalar n_y, // Scalar nx, Scalar ny, // Scalar other_nx, Scalar other_ny, // Scalar ref_intensity, // Scalar str_0_0, Scalar str_0_1, Scalar str_0_2, Scalar str_0_3, // Scalar str_1_0, Scalar str_1_1, Scalar str_1_2, Scalar str_1_3, // Scalar str_2_0, Scalar str_2_1, Scalar str_2_2, Scalar str_2_3, // cudaTextureObject_t stereo_texture, // Scalar* residuals, Scalar* jacobian) { // const Scalar term0 = sqrt(-n_x*n_x - n_y*n_y + 1); // const Scalar term1 = n_x*other_nx + n_y*other_ny - term0; // const Scalar term2 = 1.0f/term1; // const Scalar term3 = str_1_2*term2; // const Scalar term4 = 1.0f/inv_depth; // const Scalar term5 = n_x*nx; // const Scalar term6 = n_y*ny; // const Scalar term7 = -term0*term4 + term4*term5 + term4*term6; // const Scalar term8 = other_nx*str_1_0*term2; // const Scalar term9 = other_ny*str_1_1*term2; // const Scalar term10 = str_1_3 + term3*term7 + term7*term8 + term7*term9; // const Scalar term11 = str_2_2*term2; // const Scalar term12 = other_nx*str_2_0*term2; // const Scalar term13 = other_ny*str_2_1*term2; // const Scalar term14 = str_2_3 + term11*term7 + term12*term7 + term13*term7; // const Scalar term15 = 1.0f/term14; // const Scalar term16 = fy*term15; // // float py = cy + term10*term16; // int iy = static_cast<int>(py); // const Scalar term17 = py - iy; // // const Scalar term18 = str_0_2*term2; // const Scalar term19 = other_nx*str_0_0*term2; // const Scalar term20 = other_ny*str_0_1*term2; // const Scalar term21 = str_0_3 + term18*term7 + term19*term7 + term20*term7; // const Scalar term22 = fx*term15; // // float px = cx + term21*term22; // int ix = static_cast<int>(px); // const Scalar term23 = px - ix; // // Scalar top_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 0.5f); // Scalar top_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 0.5f); // Scalar bottom_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 1.5f); // Scalar bottom_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 1.5f); // // const Scalar term24 = -term23 + 1; // const Scalar term25 = bottom_left*term24 + bottom_right*term23; // const Scalar term26 = -term17 + 1; // const Scalar term27 = term23*top_right; // const Scalar term28 = term24*top_left; // const Scalar term29 = -term17*(bottom_left - bottom_right) - term26*(top_left - top_right); // const Scalar term30 = term4 * term4; // const Scalar term31 = term0 - term5 - term6; // const Scalar term32 = term30*term31; // const Scalar term33 = term15 * term15; // const Scalar term34 = term30*term31*term33*(term11 + term12 + term13); // const Scalar term35 = term25 - term27 - term28; // const Scalar term36 = 1.0f/term0; // const Scalar term37 = n_x*term36; // const Scalar term38 = nx*term4 + term37*term4; // const Scalar term39 = -other_nx - term37; // const Scalar term40 = term2 * term2; // // const Scalar term40Xterm7 = term40*term7; // // const Scalar term41 = str_0_2*term40Xterm7; // const Scalar term42 = other_nx*str_0_0*term40Xterm7; // const Scalar term43 = other_ny*str_0_1*term40Xterm7; // const Scalar term44 = fx*term21*term33; // const Scalar term45 = str_2_2*term40Xterm7; // const Scalar term46 = other_nx*str_2_0*term40Xterm7; // const Scalar term47 = other_ny*str_2_1*term40Xterm7; // const Scalar term48 = -term11*term38 - term12*term38 - term13*term38 - term39*term45 - term39*term46 - term39*term47; // const Scalar term49 = str_1_2*term40Xterm7; // const Scalar term50 = other_nx*str_1_0*term40Xterm7; // const Scalar term51 = other_ny*str_1_1*term40Xterm7; // const Scalar term52 = fy*term10*term33; // const Scalar term53 = n_y*term36; // const Scalar term54 = ny*term4 + term4*term53; // const Scalar term55 = -other_ny - term53; // const Scalar term56 = -term11*term54 - term12*term54 - term13*term54 - term45*term55 - term46*term55 - term47*term55; // // *residuals = -ref_intensity + term17*term25 + term26*(term27 + term28); // jacobian[0] = term29*(-fx*term21*term34 + term22*(term18*term32 + term19*term32 + term20*term32)) + term35*(-fy*term10*term34 + term16*(term3*term32 + term32*term8 + term32*term9)); // jacobian[1] = term29*(term22*(term18*term38 + term19*term38 + term20*term38 + term39*term41 + term39*term42 + term39*term43) + term44*term48) + term35*(term16*(term3*term38 + term38*term8 + term38*term9 + term39*term49 + term39*term50 + term39*term51) + term48*term52); // jacobian[2] = term29*(term22*(term18*term54 + term19*term54 + term20*term54 + term41*term55 + term42*term55 + term43*term55) + term44*term56) + term35*(term16*(term3*term54 + term49*term55 + term50*term55 + term51*term55 + term54*term8 + term54*term9) + term52*term56); // } // // template <int kContextRadius> // __global__ void PatchMatchOptimizationStepCUDAKernel( // int match_metric, // float max_normal_2d_length, // CUDAUnprojectionLookup2D_ unprojector, // CUDABuffer_<u8> reference_image, // cudaTextureObject_t reference_texture, // CUDAMatrix3x4 stereo_tr_reference, // PixelCornerProjector projector, // cudaTextureObject_t stereo_image, // CUDABuffer_<float> inv_depth_map, // CUDABuffer_<char2> normals, // CUDABuffer_<float> costs, // CUDABuffer_<curandState> random_states, // CUDABuffer_<float> lambda) { // unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; // unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // // if (x >= kContextRadius && y >= kContextRadius && // x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { // float inv_depth = inv_depth_map(y, x); // char2 normal_xy_char = normals(y, x); // float2 normal_xy = make_float2( // normal_xy_char.x * (1 / 127.f), normal_xy_char.y * (1 / 127.f)); // float2 nxy = unprojector.UnprojectPoint(x, y); // // // Gauss-Newton update equation coefficients. // float H[3 + 2 + 1] = {0, 0, 0, 0, 0, 0}; // float b[3] = {0, 0, 0}; // // #pragma unroll // for (int dy = -kContextRadius; dy <= kContextRadius; ++ dy) { // #pragma unroll // for (int dx = -kContextRadius; dx <= kContextRadius; ++ dx) { // float raw_residual; // float jacobian[3]; // // float2 other_nxy = unprojector.UnprojectPoint(x + dx, y + dy); // // ComputeResidualAndJacobian( // projector.cx - 0.5f, projector.cy - 0.5f, projector.fx, projector.fy, // inv_depth, normal_xy.x, normal_xy.y, // nxy.x, nxy.y, // other_nxy.x, other_nxy.y, // reference_image(y + dy, x + dx), // stereo_tr_reference.row0.x, stereo_tr_reference.row0.y, stereo_tr_reference.row0.z, stereo_tr_reference.row0.w, // stereo_tr_reference.row1.x, stereo_tr_reference.row1.y, stereo_tr_reference.row1.z, stereo_tr_reference.row1.w, // stereo_tr_reference.row2.x, stereo_tr_reference.row2.y, stereo_tr_reference.row2.z, stereo_tr_reference.row2.w, // stereo_image, // &raw_residual, jacobian); // // // Accumulate // b[0] += raw_residual * jacobian[0]; // b[1] += raw_residual * jacobian[1]; // b[2] += raw_residual * jacobian[2]; // // H[0] += jacobian[0] * jacobian[0]; // H[1] += jacobian[0] * jacobian[1]; // H[2] += jacobian[0] * jacobian[2]; // // H[3] += jacobian[1] * jacobian[1]; // H[4] += jacobian[1] * jacobian[2]; // // H[5] += jacobian[2] * jacobian[2]; // } // } // // /*// TEST: Optimize inv_depth only // b[0] = b[0] / H[0]; // inv_depth -= b[0];*/ // // // Levenberg-Marquardt // const float kDiagLambda = lambda(y, x); // H[0] *= kDiagLambda; // H[3] *= kDiagLambda; // H[5] *= kDiagLambda; // // // Solve for the update using Cholesky decomposition // // (H[0] ) (H[0] H[1] H[2]) (x[0]) (b[0]) // // (H[1] H[3] ) * ( H[3] H[4]) * (x[1]) = (b[1]) // // (H[2] H[4] H[5]) ( H[5]) (x[2]) (b[2]) // H[0] = sqrtf(H[0]); // // H[1] = 1.f / H[0] * H[1]; // H[3] = sqrtf(H[3] - H[1] * H[1]); // // H[2] = 1.f / H[0] * H[2]; // H[4] = 1.f / H[3] * (H[4] - H[1] * H[2]); // H[5] = sqrtf(H[5] - H[2] * H[2] - H[4] * H[4]); // // // Re-use b for the intermediate vector // b[0] = (b[0] / H[0]); // b[1] = (b[1] - H[1] * b[0]) / H[3]; // b[2] = (b[2] - H[2] * b[0] - H[4] * b[1]) / H[5]; // // // Re-use b for the delta vector // b[2] = (b[2] / H[5]); // b[1] = (b[1] - H[4] * b[2]) / H[3]; // b[0] = (b[0] - H[1] * b[1] - H[2] * b[2]) / H[0]; // // // Apply the update, sanitize normal if necessary // inv_depth -= b[0]; // normal_xy.x -= b[1]; // normal_xy.y -= b[2]; // // float length = sqrtf(normal_xy.x * normal_xy.x + normal_xy.y * normal_xy.y); // if (length > max_normal_2d_length) { // normal_xy.x *= max_normal_2d_length / length; // normal_xy.y *= max_normal_2d_length / length; // } // // // Test whether the update lowers the cost // float proposal_costs = ComputeCosts<kContextRadius>( // x, y, // normal_xy, // inv_depth, // unprojector, // reference_image, // reference_texture, // stereo_tr_reference, // projector, // stereo_image, // match_metric, // 0, // TODO: Update if using this function again // CUDABuffer_<float>()); // TODO: Update if using this function again // // if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { // costs(y, x) = proposal_costs; // normals(y, x) = make_char2(normal_xy.x * 127.f, normal_xy.y * 127.f); // TODO: in this and similar places: rounding? // inv_depth_map(y, x) = inv_depth; // // lambda(y, x) *= 0.5f; // } else { // lambda(y, x) *= 2.f; // } // } // } // // void PatchMatchOptimizationStepCUDA( // cudaStream_t stream, // int match_metric, // int context_radius, // float max_normal_2d_length, // cudaTextureObject_t reference_unprojection_lookup, // const CUDABuffer_<u8>& reference_image, // cudaTextureObject_t reference_texture, // const CUDAMatrix3x4& stereo_tr_reference, // const PixelCornerProjector_& stereo_camera, // const cudaTextureObject_t stereo_image, // CUDABuffer_<float>* inv_depth_map, // CUDABuffer_<char2>* normals, // CUDABuffer_<float>* costs, // CUDABuffer_<curandState>* random_states, // CUDABuffer_<float>* lambda) { // CHECK_CUDA_NO_ERROR(); // COMPILE_INT_4_OPTIONS(context_radius, 5, 8, 10, 15, CUDA_AUTO_TUNE_2D( // PatchMatchOptimizationStepCUDAKernel<_context_radius>, // 16, 16, // inv_depth_map->width(), inv_depth_map->height(), // 0, stream, // /* kernel parameters */ // match_metric, // max_normal_2d_length, // CUDAUnprojectionLookup2D_(reference_unprojection_lookup), // reference_image, // reference_texture, // stereo_tr_reference, // stereo_camera, // stereo_image, // stereo_camera.width(), // stereo_camera.height(), // *inv_depth_map, // *normals, // *costs, // *random_states, // *lambda)); // cudaDeviceSynchronize(); // CHECK_CUDA_NO_ERROR(); // }
d530f2e1e922cf0fe3dee5a3320a66624ce59dba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels_hip.cuh" __global__ void kernel_index(Hash_item * hash_table, Parameters_index * params, const char * sequence) { // Get the index of the current element to be processed unsigned long global_id = blockDim.x; //ulong local_id = get_local_id(0); unsigned long kmers_in_work_item = params->kmers_per_work_item; unsigned long kmer_size = params->kmer_size; unsigned long z_value = params->z_value; unsigned long t_work_items = params->global_item_size; unsigned long offset = params->offset; unsigned long j, k; // Until reaching end of sequence for(j=0; j<kmers_in_work_item; j++){ // Coalescent unsigned long pos = global_id + (j * t_work_items); unsigned long hash12 = 0, hash_full = 0; unsigned char checker = 0, multiplier = 0, val; for(k=0; k<FIXED_K; k++){ val = (unsigned char) sequence[pos+k]; multiplier = (val & (unsigned char) 6) >> 1; checker = checker | (val & (unsigned char) 8); // Verified hash12 += (((unsigned long) 1) << (2*k)) * (unsigned long) multiplier; } hash_full = hash12; for(k=FIXED_K; k<kmer_size; k+=z_value){ val = (unsigned char) sequence[pos+k]; multiplier = (val & (unsigned char) 6) >> 1; checker = checker | (val & (unsigned char) 8); // Verified hash_full += (((unsigned long) 1) << (2*k)) * (unsigned long) multiplier; } if(checker == (unsigned char) 0){ // Verified hash_table[0].key = hash_full; //hash_table[hash12].key = hash_full; //hash_table[hash12].pos_in_x = pos + offset; //atom_inc(&hash_table[hash12].repeat); } } }
d530f2e1e922cf0fe3dee5a3320a66624ce59dba.cu
#include "kernels.cuh" __global__ void kernel_index(Hash_item * hash_table, Parameters_index * params, const char * sequence) { // Get the index of the current element to be processed unsigned long global_id = blockDim.x; //ulong local_id = get_local_id(0); unsigned long kmers_in_work_item = params->kmers_per_work_item; unsigned long kmer_size = params->kmer_size; unsigned long z_value = params->z_value; unsigned long t_work_items = params->global_item_size; unsigned long offset = params->offset; unsigned long j, k; // Until reaching end of sequence for(j=0; j<kmers_in_work_item; j++){ // Coalescent unsigned long pos = global_id + (j * t_work_items); unsigned long hash12 = 0, hash_full = 0; unsigned char checker = 0, multiplier = 0, val; for(k=0; k<FIXED_K; k++){ val = (unsigned char) sequence[pos+k]; multiplier = (val & (unsigned char) 6) >> 1; checker = checker | (val & (unsigned char) 8); // Verified hash12 += (((unsigned long) 1) << (2*k)) * (unsigned long) multiplier; } hash_full = hash12; for(k=FIXED_K; k<kmer_size; k+=z_value){ val = (unsigned char) sequence[pos+k]; multiplier = (val & (unsigned char) 6) >> 1; checker = checker | (val & (unsigned char) 8); // Verified hash_full += (((unsigned long) 1) << (2*k)) * (unsigned long) multiplier; } if(checker == (unsigned char) 0){ // Verified hash_table[0].key = hash_full; //hash_table[hash12].key = hash_full; //hash_table[hash12].pos_in_x = pos + offset; //atom_inc(&hash_table[hash12].repeat); } } }
5f235fc5ecb92fbdd5f42a0243cbea08ca936c49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "blur.h" #include <vector> template <typename T> T* createBinomialKernel(int halfWidth) { // Construct the binomial std::vector<T> hBinomial{ 1,2,1 }; for (int cHalfWidth = 1; cHalfWidth < halfWidth; ++cHalfWidth) { for (int step = 0; step < 2; ++step) { hBinomial.insert(hBinomial.begin(), 0); for (int pos = 0; pos < hBinomial.size() - 1; ++pos) { hBinomial[pos] = hBinomial[pos] + hBinomial[pos + 1]; } } } // Allocate GPU array and copy data int length = (halfWidth * 2 + 1) * sizeof(T); T* dBinomial = 0; hipMalloc(&dBinomial, length); hipMemcpy(dBinomial, hBinomial.data(), length, hipMemcpyHostToDevice); return dBinomial; } template <typename T> T* createBinomialKernelAccum(int halfWidth) { // Construct the binomial std::vector<T> hBinomial{ 1,2,1 }; for (int cHalfWidth = 1; cHalfWidth < halfWidth; ++cHalfWidth) { for (int step = 0; step < 2; ++step) { hBinomial.insert(hBinomial.begin(), 0); for (int pos = 0; pos < hBinomial.size() - 1; ++pos) { hBinomial[pos] = hBinomial[pos] + hBinomial[pos + 1]; } } } // Sum elements for (int i = 1; i < hBinomial.size(); ++i) { hBinomial[i] = hBinomial[i] + hBinomial[i - 1]; } // Allocate GPU array and copy data int length = (halfWidth * 2 + 1) * sizeof(T); T* dBinomial = 0; hipMalloc(&dBinomial, length); hipMemcpy(dBinomial, hBinomial.data(), length, hipMemcpyHostToDevice); return dBinomial; } template <typename T> __global__ void op::binomialX(T* vecOut, T* vecIn, T* filter, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T weightSum = 0; for (int dx = -halfWidth; dx <= halfWidth; ++dx) { size_t cx = x + dx; size_t idx = y + cx * height; T weight = filter[dx + halfWidth]; if (cx >= 0 && cx < width) { accum += weight * vecIn[idx]; weightSum += weight; } } vecOut[y + x * height] = accum / weightSum; } template <typename T> __global__ void op::binomialXAdjoint(T* vecOut, T* vecIn, T* filter, T* filterAccum, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T maxWeight = filterAccum[2 * halfWidth]; for (int dx = -halfWidth; dx <= halfWidth; ++dx) { size_t cx = x + dx; size_t idx = y + cx * height; T weight = filter[dx + halfWidth]; if (cx >= 0 && cx < width) { T normalize = maxWeight; if (cx > width - halfWidth - 1) { normalize -= filterAccum[halfWidth - (width - cx - 1) - 1]; } if (cx < halfWidth) { normalize -= filterAccum[halfWidth - cx - 1]; } accum += vecIn[idx] * weight / normalize; } } vecOut[y + x * height] = accum; } template <typename T> __global__ void op::binomialY(T* vecOut, T* vecIn, T* filter, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T weightSum = 0; for (int dy = -halfWidth; dy <= halfWidth; ++dy) { size_t cy = y + dy; size_t idx = cy + x * height; T weight = filter[dy + halfWidth]; if (cy >= 0 && cy < height) { accum += weight * vecIn[idx]; weightSum += weight; } } vecOut[y + x * height] = accum / weightSum; } template <typename T> __global__ void op::binomialYAdjoint(T* vecOut, T* vecIn, T* filter, T* filterAccum, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T maxWeight = filterAccum[2 * halfWidth]; for (int dy = -halfWidth; dy <= halfWidth; ++dy) { size_t cy = y + dy; size_t idx = cy + x * height; T weight = filter[dy + halfWidth]; if (cy >= 0 && cy < height) { T normalize = maxWeight; if (cy > height - halfWidth - 1) { normalize -= filterAccum[halfWidth - (height - cy - 1) - 1]; } if (cy < halfWidth) { normalize -= filterAccum[halfWidth - cy - 1]; } accum += vecIn[idx] * weight / normalize; } } vecOut[y + x * height] = accum; } template<typename T> op::SeparableFilter<T>::SeparableFilter<T>(size_t width, size_t height) : LinearImage(width, height) { hipMalloc(&dIntermediate, width * height * sizeof(T)); } template<typename T> op::Binomial<T>::Binomial<T>(unsigned int halfWidth, size_t width, size_t height) : SeparableFilter<T>(width, height) , halfWidth(halfWidth) { dFilter = createBinomialKernel<T>(halfWidth); } template<typename T> void op::Binomial<T>::apply(const void* dVectorOut, const void* dVectorIn) { op::binomialX KERNEL_ARGS2(blocks, threads) (dIntermediate, (T*) dVectorIn, dFilter, halfWidth, width, height); op::binomialY KERNEL_ARGS2(blocks, threads) ((T*) dVectorOut, dIntermediate, dFilter, halfWidth, width, height); } template<typename T> op::BinomialAdjoint<T>::BinomialAdjoint<T>(unsigned int halfWidth, size_t width, size_t height) : SeparableFilter<T>(width, height) , halfWidth(halfWidth) { dFilter = createBinomialKernel<T>(halfWidth); dAccum = createBinomialKernelAccum<T>(halfWidth); } template<typename T> void op::BinomialAdjoint<T>::apply(const void* dVectorOut, const void* dVectorIn) { op::binomialYAdjoint KERNEL_ARGS2(blocks, threads) (dIntermediate, (T*)dVectorIn, dFilter, dAccum, halfWidth, width, height); op::binomialXAdjoint KERNEL_ARGS2(blocks, threads) ((T*)dVectorOut, dIntermediate, dFilter, dAccum, halfWidth, width, height); } template __global__ void op::binomialX(float* vecOut, float* vecIn, float* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialXAdjoint(float* vecOut, float* vecIn, float* filter, float* filterAccum, int halfWidth, size_t width, size_t height); template __global__ void op::binomialY(float* vecOut, float* vecIn, float* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialYAdjoint(float* vecOut, float* vecIn, float* filter, float* filterAccum, int halfWidth, size_t width, size_t height); template class op::Binomial<float>; template class op::BinomialAdjoint<float>; template __global__ void op::binomialX(double* vecOut, double* vecIn, double* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialXAdjoint(double* vecOut, double* vecIn, double* filter, double* filterAccum, int halfWidth, size_t width, size_t height); template __global__ void op::binomialY(double* vecOut, double* vecIn, double* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialYAdjoint(double* vecOut, double* vecIn, double* filter, double* filterAccum, int halfWidth, size_t width, size_t height); template class op::Binomial<double>; template class op::BinomialAdjoint<double>;
5f235fc5ecb92fbdd5f42a0243cbea08ca936c49.cu
#include "blur.h" #include <vector> template <typename T> T* createBinomialKernel(int halfWidth) { // Construct the binomial std::vector<T> hBinomial{ 1,2,1 }; for (int cHalfWidth = 1; cHalfWidth < halfWidth; ++cHalfWidth) { for (int step = 0; step < 2; ++step) { hBinomial.insert(hBinomial.begin(), 0); for (int pos = 0; pos < hBinomial.size() - 1; ++pos) { hBinomial[pos] = hBinomial[pos] + hBinomial[pos + 1]; } } } // Allocate GPU array and copy data int length = (halfWidth * 2 + 1) * sizeof(T); T* dBinomial = 0; cudaMalloc(&dBinomial, length); cudaMemcpy(dBinomial, hBinomial.data(), length, cudaMemcpyHostToDevice); return dBinomial; } template <typename T> T* createBinomialKernelAccum(int halfWidth) { // Construct the binomial std::vector<T> hBinomial{ 1,2,1 }; for (int cHalfWidth = 1; cHalfWidth < halfWidth; ++cHalfWidth) { for (int step = 0; step < 2; ++step) { hBinomial.insert(hBinomial.begin(), 0); for (int pos = 0; pos < hBinomial.size() - 1; ++pos) { hBinomial[pos] = hBinomial[pos] + hBinomial[pos + 1]; } } } // Sum elements for (int i = 1; i < hBinomial.size(); ++i) { hBinomial[i] = hBinomial[i] + hBinomial[i - 1]; } // Allocate GPU array and copy data int length = (halfWidth * 2 + 1) * sizeof(T); T* dBinomial = 0; cudaMalloc(&dBinomial, length); cudaMemcpy(dBinomial, hBinomial.data(), length, cudaMemcpyHostToDevice); return dBinomial; } template <typename T> __global__ void op::binomialX(T* vecOut, T* vecIn, T* filter, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T weightSum = 0; for (int dx = -halfWidth; dx <= halfWidth; ++dx) { size_t cx = x + dx; size_t idx = y + cx * height; T weight = filter[dx + halfWidth]; if (cx >= 0 && cx < width) { accum += weight * vecIn[idx]; weightSum += weight; } } vecOut[y + x * height] = accum / weightSum; } template <typename T> __global__ void op::binomialXAdjoint(T* vecOut, T* vecIn, T* filter, T* filterAccum, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T maxWeight = filterAccum[2 * halfWidth]; for (int dx = -halfWidth; dx <= halfWidth; ++dx) { size_t cx = x + dx; size_t idx = y + cx * height; T weight = filter[dx + halfWidth]; if (cx >= 0 && cx < width) { T normalize = maxWeight; if (cx > width - halfWidth - 1) { normalize -= filterAccum[halfWidth - (width - cx - 1) - 1]; } if (cx < halfWidth) { normalize -= filterAccum[halfWidth - cx - 1]; } accum += vecIn[idx] * weight / normalize; } } vecOut[y + x * height] = accum; } template <typename T> __global__ void op::binomialY(T* vecOut, T* vecIn, T* filter, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T weightSum = 0; for (int dy = -halfWidth; dy <= halfWidth; ++dy) { size_t cy = y + dy; size_t idx = cy + x * height; T weight = filter[dy + halfWidth]; if (cy >= 0 && cy < height) { accum += weight * vecIn[idx]; weightSum += weight; } } vecOut[y + x * height] = accum / weightSum; } template <typename T> __global__ void op::binomialYAdjoint(T* vecOut, T* vecIn, T* filter, T* filterAccum, int halfWidth, size_t width, size_t height) { size_t x = blockIdx.x * blockDim.x + threadIdx.x; size_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } T accum = 0; T maxWeight = filterAccum[2 * halfWidth]; for (int dy = -halfWidth; dy <= halfWidth; ++dy) { size_t cy = y + dy; size_t idx = cy + x * height; T weight = filter[dy + halfWidth]; if (cy >= 0 && cy < height) { T normalize = maxWeight; if (cy > height - halfWidth - 1) { normalize -= filterAccum[halfWidth - (height - cy - 1) - 1]; } if (cy < halfWidth) { normalize -= filterAccum[halfWidth - cy - 1]; } accum += vecIn[idx] * weight / normalize; } } vecOut[y + x * height] = accum; } template<typename T> op::SeparableFilter<T>::SeparableFilter<T>(size_t width, size_t height) : LinearImage(width, height) { cudaMalloc(&dIntermediate, width * height * sizeof(T)); } template<typename T> op::Binomial<T>::Binomial<T>(unsigned int halfWidth, size_t width, size_t height) : SeparableFilter<T>(width, height) , halfWidth(halfWidth) { dFilter = createBinomialKernel<T>(halfWidth); } template<typename T> void op::Binomial<T>::apply(const void* dVectorOut, const void* dVectorIn) { op::binomialX KERNEL_ARGS2(blocks, threads) (dIntermediate, (T*) dVectorIn, dFilter, halfWidth, width, height); op::binomialY KERNEL_ARGS2(blocks, threads) ((T*) dVectorOut, dIntermediate, dFilter, halfWidth, width, height); } template<typename T> op::BinomialAdjoint<T>::BinomialAdjoint<T>(unsigned int halfWidth, size_t width, size_t height) : SeparableFilter<T>(width, height) , halfWidth(halfWidth) { dFilter = createBinomialKernel<T>(halfWidth); dAccum = createBinomialKernelAccum<T>(halfWidth); } template<typename T> void op::BinomialAdjoint<T>::apply(const void* dVectorOut, const void* dVectorIn) { op::binomialYAdjoint KERNEL_ARGS2(blocks, threads) (dIntermediate, (T*)dVectorIn, dFilter, dAccum, halfWidth, width, height); op::binomialXAdjoint KERNEL_ARGS2(blocks, threads) ((T*)dVectorOut, dIntermediate, dFilter, dAccum, halfWidth, width, height); } template __global__ void op::binomialX(float* vecOut, float* vecIn, float* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialXAdjoint(float* vecOut, float* vecIn, float* filter, float* filterAccum, int halfWidth, size_t width, size_t height); template __global__ void op::binomialY(float* vecOut, float* vecIn, float* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialYAdjoint(float* vecOut, float* vecIn, float* filter, float* filterAccum, int halfWidth, size_t width, size_t height); template class op::Binomial<float>; template class op::BinomialAdjoint<float>; template __global__ void op::binomialX(double* vecOut, double* vecIn, double* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialXAdjoint(double* vecOut, double* vecIn, double* filter, double* filterAccum, int halfWidth, size_t width, size_t height); template __global__ void op::binomialY(double* vecOut, double* vecIn, double* filter, int halfWidth, size_t width, size_t height); template __global__ void op::binomialYAdjoint(double* vecOut, double* vecIn, double* filter, double* filterAccum, int halfWidth, size_t width, size_t height); template class op::Binomial<double>; template class op::BinomialAdjoint<double>;
716a12fadbf8fe611330add0aeab0f53cd68b1d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////// template<typename T> __device__ void tearKernel(void *vx, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ Nd4jLong tadLength; __shared__ int tadEWS; __shared__ int zEWS; // __shared__ int tadRank; __shared__ Nd4jLong numTads; // __shared__ int zRank; // __shared__ Nd4jLong *tadShape; // __shared__ Nd4jLong *tadStride; // __shared__ Nd4jLong *zShape; // __shared__ Nd4jLong *zStride; __shared__ T* x; if (threadIdx.x == 0) { tadLength = shape::length(tadShapeInfo); tadEWS = shape::elementWiseStride(tadShapeInfo); zEWS = shape::elementWiseStride(zShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; x = static_cast<T *>(vx); } __syncthreads(); for (Nd4jLong r = blockIdx.x; r < numTads; r += gridDim.x) { T *z = (T *) targets[r]; T *s = x + tadOffsets[r]; if (zEWS > 0 && tadEWS > 0) { for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) z[i * zEWS] = s[i * tadEWS]; } else { for (Nd4jLong j = threadIdx.x; j < tadLength; j += blockDim.x) { auto xOffset = shape::getIndexOffset(j, tadShapeInfo); auto zOffset = shape::getIndexOffset(j, zShapeInfo); z[zOffset] = s[xOffset]; } } } } //////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execTearKernel(void *vx, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { tearKernel<T>(vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template<typename T> __host__ void tearKernelGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( execTearKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "tear(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT tearKernelGeneric, (dim3 & launchDims, hipStream_t * stream, void * vx, Nd4jLong * xShapeInfo, Nd4jPointer *targets, Nd4jLong * zShapeInfo, Nd4jLong * tadShapeInfo, Nd4jLong * tadOffsets), LIBND4J_TYPES); }
716a12fadbf8fe611330add0aeab0f53cd68b1d1.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////// template<typename T> __device__ void tearKernel(void *vx, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ Nd4jLong tadLength; __shared__ int tadEWS; __shared__ int zEWS; // __shared__ int tadRank; __shared__ Nd4jLong numTads; // __shared__ int zRank; // __shared__ Nd4jLong *tadShape; // __shared__ Nd4jLong *tadStride; // __shared__ Nd4jLong *zShape; // __shared__ Nd4jLong *zStride; __shared__ T* x; if (threadIdx.x == 0) { tadLength = shape::length(tadShapeInfo); tadEWS = shape::elementWiseStride(tadShapeInfo); zEWS = shape::elementWiseStride(zShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; x = static_cast<T *>(vx); } __syncthreads(); for (Nd4jLong r = blockIdx.x; r < numTads; r += gridDim.x) { T *z = (T *) targets[r]; T *s = x + tadOffsets[r]; if (zEWS > 0 && tadEWS > 0) { for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) z[i * zEWS] = s[i * tadEWS]; } else { for (Nd4jLong j = threadIdx.x; j < tadLength; j += blockDim.x) { auto xOffset = shape::getIndexOffset(j, tadShapeInfo); auto zOffset = shape::getIndexOffset(j, zShapeInfo); z[zOffset] = s[xOffset]; } } } } //////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execTearKernel(void *vx, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { tearKernel<T>(vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template<typename T> __host__ void tearKernelGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { execTearKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "tear(...) failed"); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT tearKernelGeneric, (dim3 & launchDims, cudaStream_t * stream, void * vx, Nd4jLong * xShapeInfo, Nd4jPointer *targets, Nd4jLong * zShapeInfo, Nd4jLong * tadShapeInfo, Nd4jLong * tadOffsets), LIBND4J_TYPES); }
757387a41d36fac987825721a543ab6a8d49c6fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <sstream> #include <stdio.h> #include <type_traits> #include <cmath> #include <time.h> #include <fstream> #include <opencv2\core\core.hpp> #include <opencv2\highgui\highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <omp.h> typedef double T_DOUBLE; typedef char T_CHAR; typedef long T_LONG; typedef float T_FLOAT; typedef int T_INT; typedef unsigned char T_BYTE; const T_LONG BLOQUELINEA = 1024; using namespace cv; using namespace std; #define norm(x, y) (fabs(x) + fabs(y)) //Variables globales clock_t h_tIni, h_tFin, h_tTotal; // Para calculo de tiempo en CPU hipEvent_t d_tIni, d_tFin; float d_tTotal; // Para calculo de tiempo en GPU /********************************************* * PARA VERIFICAR ERRORES DE CUDA QUE SE DESENCADENA DESDE EL HOST *********************************************/ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /********************************************** * FUNCION PARA OBTENER EL TIEMPO EN CPU **********************************************/ double getMilisegundos(clock_t c) { double tiempo = 0; tiempo = ((c / (double)CLOCKS_PER_SEC) * 1000); return tiempo; } /************************************************************* * PARTE HOST *************************************************************/ template<class T> class h_Matriz { public: T *ptr; size_t row, col; h_Matriz(){} h_Matriz(size_t n) { inicializar(n,n); } h_Matriz(size_t m, size_t n) { inicializar(m, n); } h_Matriz(cv::Mat img) { inicializar(img.rows, img.cols); for (int i = 0; i < row; i++) memcpy(&(ptr[i*col]), img.ptr<T>(i, 0), col * sizeof(T)); } void inicializar(size_t m, size_t n) { row = m; col = n; ptr = new T[row * col]; } inline T Get(size_t r, size_t c) { return *(ptr + r*(col)+c); } inline void Set(size_t r, size_t c, T val) { *(ptr + r*(col)+c) = val; } void Set_Matriz(h_Matriz<T> mat) { delete ptr; inicializar(mat.row, mat.col); memcpy(&(ptr[0]), &(mat.ptr[0]), row*col * sizeof(T)); } void Get_Matriz(h_Matriz<T> *mat) { if (mat->row == row && mat->col == col) memcpy(&mat->ptr[0], &(ptr[0]), row*col * sizeof(T)); } void h_Matriz2Mat(cv::Mat *img) { if (img->rows == row && img->cols == col) for (size_t i = 0; i < row; i++) memcpy(img->ptr<T>(i, 0), &(ptr[i*col]), col * sizeof(T)); } void Imprimir() { for (size_t i = 0; i < row; i++) { for (size_t j = 0; j < col; j++) cout << ptr[i*col + j] << "\t"; cout << endl; } } ~h_Matriz() { } }; // modificar el kernel para convolucion template<class T> void convolucion(h_Matriz<T> *kernel) { int r = kernel->row ; int c = kernel->col; h_Matriz<T> temp(r,c); for (int k = 0; k < r; k++) for (int l = 0; l < c; l++) temp.Set(k, l, kernel->Get(r - k-1, c - l-1)); kernel->Set_Matriz(temp); } // correlacion, es convolucion si el kernel es modificado para convolucion template<class T> void correlacion(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T> kernel) { T suma1; int mitad = kernel.row / 2; for (int i = mitad; i < img->row-mitad;i++) for (int j = mitad; j < img->col - mitad; j++) { suma1 = 0; for (int k = 0; k < kernel.row; k++) for (int l = 0; l < kernel.col; l++) suma1 += img->Get(i - mitad + k, j - mitad + l)*kernel.Get(k, l); imgout->Set(i, j, (T)(suma1)); } } // correlacion2 con dos kernel, es convolucion si los kernel es modificado para convolucion template<class T, class T1> void correlacion2(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T1> kernel, h_Matriz<T1> kernel2) { T suma1; T suma2; T tmp; int mitad = kernel.row / 2; for (int i = mitad; i < img->row - mitad; i++) for (int j = mitad; j < img->col - mitad; j++){ suma1 = 0; suma2 = 0; for (int k = 0; k < kernel.row; k++) for (int l = 0; l < kernel.col; l++){ tmp = img->Get(i - mitad+k, j - mitad+l); suma1 += tmp*kernel.Get(k, l); suma2 += tmp*kernel2.Get(k, l); } T val = norm((T)suma1, (T)suma2); imgout->Set(i, j, (T)val); } } /************************************************************* * PARTE HOST - paralelo con OpenMP *************************************************************/ // correlacion, es convolucion si el kernel es modificado para convolucion template<class T> void p_correlacion(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T> kernel) { T suma1; int mitad = kernel.row / 2; int i, j, k, l; int ir = img->row - mitad; int ic = img->col - mitad; int kr = kernel.row; int kc = kernel.row; //kernel.Imprimir(); #pragma omp parallel for shared(kernel, img,imgout, ir, ic,kr,kc, mitad) private( i, j, k, l, suma1) for (int i = mitad; i < ir; i++) for (int j = mitad; j < ic; j++) { suma1 = 0; for (int k = 0; k < kr; k++) for (int l = 0; l < kc; l++) suma1 += img->Get(i - mitad + k, j - mitad + l)*kernel.Get(k, l); //std::cout << suma1<<std::endl; imgout->Set(i, j, (T)(suma1)); } } // correlacion2 con dos kernel, es convolucion si los kernel es modificado para convolucion template<class T, class T1> void p_correlacion2(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T1> kernel, h_Matriz<T1> kernel2) { T suma1; T suma2; T tmp; T_INT mitad = kernel.row / 2; T val; T_INT ir = img->row - mitad; T_INT ic = img->col - mitad; T_INT kr = kernel.row; T_INT kc = kernel.row; T_INT i, j, k, l; omp_set_num_threads(8); #pragma omp parallel for shared(kernel, kernel2, img,imgout, ir, ic,kr,kc, mitad) private( i, j, k, l, suma1, suma2,tmp) for (i = mitad; i < ir; i++){ for (j = mitad; j < ic; j++){ suma1 = 0; suma2 = 0; for (k = 0; k < kr; k++){ for (l = 0; l < kc; l++){ tmp = img->Get(i - mitad + k, j - mitad + l); suma1 += tmp*kernel.Get(k, l); suma2 += tmp*kernel2.Get(k, l); } } imgout->Set(i, j, (T)norm((T)suma1, (T)suma2)); } } } /***************************************************************** * PARTE DEVICE *****************************************************************/ template<class T> class d_Matriz { public: T *d_ptr; size_t row, col; d_Matriz(h_Matriz<T> mat) { inicializar(mat); } void inicializar(h_Matriz<T> mat) { row = mat.row; col = mat.col; checkCudaErrors(hipMalloc((void**)&d_ptr, row*col * sizeof(T))); checkCudaErrors(hipMemcpy(d_ptr, mat.ptr, col* row*sizeof(T), hipMemcpyHostToDevice)); } __device__ inline T Get(size_t r, size_t c) { return *(d_ptr + r*(col)+c); } __device__ inline void Set(size_t r, size_t c, T val) { *(d_ptr + r*(col)+c) = val; } void Set_Matriz(h_Matriz<T> mat) { checkCudaErrors(hipFree(d_ptr)); inicializar(mat); } void Get_Matriz(h_Matriz<T> mat) { if (mat.row == row && mat.col == col) checkCudaErrors(hipMemcpy(mat.ptr, d_ptr, col* row*sizeof(T), hipMemcpyDeviceToHost)); } ~d_Matriz() { } }; // modificar el kernel para convolucion template<class T> __device__ void d_conv3(d_Matriz<T> *kernel) { int r = kernel->row - 1; int c = kernel->col - 1; d_Matriz<T> temp(3); for (int k = 0; k <= r; k++) for (int l = 0; l <= c; l++) temp.Set(k, l, kernel->Get(r-k, c-l)); kernel->Set_Matriz(temp); } // correlacion, es convolucion si el kernel es modificado para convolucion template<class T> __global__ void d_correlacion(d_Matriz<T> img, d_Matriz<T> imgout, d_Matriz<T> kernel) { T suma1; size_t i = threadIdx.x; size_t j = blockIdx.x; T_INT mitad = kernel.row / 2; while (j<(img.row - mitad)*img.col) { suma1 = 0; for (T_INT k = 0; k < kernel.row; k++) for (T_INT l = 0; l < kernel.col; l++) suma1 += img.Get(i - mitad + k, j - mitad + l)*kernel.Get(k, l); imgout.Set(i, j, (T)(suma1)); j += blockDim.x*gridDim.x; } } //__device__ void d_correlacion(d_Matriz<T> &img, d_Matriz<T> &imgout, d_Matriz<T> kernel) // correlacion2 con dos kernel, es convolucion si los kernel es modificado para convolucion template<class T, class T1> __global__ void d_correlacion2(d_Matriz<T> img, d_Matriz<T> imgout, d_Matriz<T1> kernel, d_Matriz<T1> kernel2) { T suma1; T suma2; T tmp; T_INT mitad = kernel.row / 2; size_t i = threadIdx.x; size_t j = blockIdx.x; while (j<(img.row - mitad)*img.col) { suma1 = 0; suma2 = 0; for (T_INT k = 0; k < kernel.row; k++) for (T_INT l = 0; l < kernel.col; l++){ tmp = img.Get(i - mitad+k, j-mitad+l); suma1 += tmp*kernel.Get(k, l); suma2 += tmp*kernel2.Get(k, l); } imgout.Set(i, j, (T)norm((T)suma1, (T)suma2)); j += blockDim.x*gridDim.x; } } /***************************************************************** * OBTENER FILTRO DE ARCHIVO.KER *****************************************************************/ template<class T> void AbrirKernel(T_CHAR *FileOrigen, h_Matriz<T> *kernel, h_Matriz<T> *kernel2, T_INT &nroKernel) { std::ifstream origen(FileOrigen); if (origen.fail()) std::cerr << "Error al abrir el kernel: " << FileOrigen << std::endl; else { T_CHAR *bloque; bloque = new T_CHAR[BLOQUELINEA + 1]; T_INT m, n; T_FLOAT val; // Leer el numero de Kernel origen.getline(bloque, BLOQUELINEA, '\n'); nroKernel = atoi(bloque); if (nroKernel == 1)// 1 solo kernel { origen.getline(bloque, BLOQUELINEA, '\n'); m = atoi(bloque); origen.getline(bloque, BLOQUELINEA, '\n'); n = atoi(bloque); kernel->inicializar(m, n); kernel2->inicializar(m, n); for (T_INT i = 0; i < m; i++) // llenar la matriz for (T_INT j = 0; j < m; j++) { origen.getline(bloque, BLOQUELINEA, '\n'); val = atof(bloque); kernel->Set(i, j, val); kernel2->Set(i, j, val); } } else // se supone que son dos filtros para una misma convolucion ejemplo sobel { // para el primer kernel origen.getline(bloque, BLOQUELINEA, '\n'); m = atoi(bloque); origen.getline(bloque, BLOQUELINEA, '\n'); n = atoi(bloque); kernel->inicializar(m, n); for (T_INT i = 0; i < m; i++) // llenar la matriz for (T_INT j = 0; j < m; j++) { origen.getline(bloque, BLOQUELINEA, '\n'); val = atof(bloque); kernel->Set(i, j, val); } // para el segundo kernel origen.getline(bloque, BLOQUELINEA, '\n'); m = atoi(bloque); origen.getline(bloque, BLOQUELINEA, '\n'); n = atoi(bloque); kernel2->inicializar(m, n); for (T_INT i = 0; i < m; i++) // llenar la matriz for (T_INT j = 0; j < m; j++) { origen.getline(bloque, BLOQUELINEA, '\n'); val = atof(bloque); kernel2->Set(i, j, val); } } } } int main() // main en video { //namedWindow("ventana", CV_WINDOW_AUTOSIZE); //cargar el archivo de video especificado cv::VideoCapture cvideo("video2.mp4"); //verificar si se ha podio cargar el video if (!cvideo.isOpened()) return -1; // obtener los cuadros por segundo T_DOUBLE fps = cvideo.get(CV_CAP_PROP_FPS); T_DOUBLE nf = cvideo.get(CV_CAP_PROP_FRAME_COUNT); cout << "Nro de frames: " << nf<< endl; cout<<"Nro frames por segundos: " <<fps << endl; // calcular el tiempo de espera entre cada imagen a mostrar //int delay = 1000 / fps; T_INT delay = 1; h_Matriz<float> kernelx; h_Matriz<float> kernely; /********************************************** * M O D O **********************************************/ int modo = 3;// CPU:1, PAR:2, GPU:3 int filtros = 1; //------------------------------------------------ // OBTENER KERNEL PARA FILTRO //------------------------------------------------ char *modoNombre; modoNombre = new char[250]; //strcpy(modoNombre, "sobel.ker"); // 3x3 dos filtros //strcpy(modoNombre, "repujado.ker");// 3x3 un filtro //strcpy(modoNombre, "media3.ker"); //strcpy(modoNombre, "media5.ker"); //strcpy(modoNombre, "media11.ker"); //strcpy(modoNombre, "media15.ker"); strcpy(modoNombre, "media25.ker"); AbrirKernel<T_FLOAT>(modoNombre,&kernelx,&kernely,filtros); std::cout << "Kernel: "<<modoNombre <<std::endl; kernelx.Imprimir(); kernely.Imprimir(); std::cout << "presione g para correr en gpu," << std::endl << " p para correr en cpu paralelo y " << std::endl << " c para correr en cpu secuencial...." << std::endl<<" esc para salir... o esperar que termine el video..."; getchar(); // para cambiar el kernel para obtener kernel para convolution convolucion(&kernelx); convolucion(&kernely); //std:: cout << "Kernel con covolucion"<<std::endl; //kernelx.Imprimir(); //kernely.Imprimir(); // Para el DEVICE d_Matriz<float> d_kernelx(kernelx); d_Matriz<float> d_kernely(kernely); //d_correlacion2<float> << <imagen.col, 1024 >> >(d_imagen, d_imagenout, d_kernelx, d_kernely); //d_imagenout.Get_Matriz(imagenout); Mat cvimagen, cvimageng ; double contf = 0; while (contf <nf) { cvideo >> cvimagen; cv::cvtColor(cvimagen, cvimageng, CV_BGR2GRAY); cvimageng.convertTo(cvimageng, CV_32FC1); // en HOST h_Matriz<float> imagen(cvimageng); h_Matriz<float> imagenout(imagen.row, imagen.col); //cargar el primer cuadro o imagen del video en frame switch (modo) { case 1: { h_tIni = clock(); if (filtros==2) correlacion2(&imagen, &imagenout, kernelx, kernely); else correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); cout << "CPU : " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; break; } case 2: { h_tIni = clock(); if (filtros == 2) p_correlacion2(&imagen, &imagenout, kernelx, kernely); else p_correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); cout << "CPU Paralela: " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; break; } case 3: { float d_ttemp; hipEventCreate(&d_tIni); hipEventCreate(&d_tFin); hipEventRecord(d_tIni, 0); d_Matriz<float> d_imagen(imagen); d_Matriz<float> d_imagenout(imagenout); if (filtros == 2) hipLaunchKernelGGL(( d_correlacion2<float>) , dim3(imagen.col), dim3(imagen.row / 2) , 0, 0, d_imagen, d_imagenout, d_kernelx, d_kernely); else hipLaunchKernelGGL(( d_correlacion<float>) , dim3(imagen.col), dim3(imagen.row / 2) , 0, 0, d_imagen, d_imagenout, d_kernelx); d_imagenout.Get_Matriz(imagenout); checkCudaErrors(hipFree(d_imagen.d_ptr)); checkCudaErrors(hipFree(d_imagenout.d_ptr)); hipEventRecord(d_tFin, 0); hipEventSynchronize(d_tFin); hipEventElapsedTime(&d_ttemp, d_tIni, d_tFin); strcpy(modoNombre, "Salida"); cout << "GPU: " << d_ttemp << "ms por frame" << endl; break; } default: break; } imagenout.h_Matriz2Mat(&cvimageng); cvimageng.convertTo(cvimageng, CV_8UC1); cv::imshow(modoNombre, cvimageng); //esperar un periodo de tiempo especificado por delay //si se presiona la tecla 27 (ESC) salir del loop uchar tec=cv::waitKey(delay); //cout << tec<<endl; if (tec == 99) modo = 1; if (tec == 112) modo = 2; if (tec == 103) modo = 3; if (tec == 27 ) break; contf++; delete imagen.ptr; delete imagenout.ptr; } cout << "Ups, se termino el video" << endl; cv::waitKey(); cv::destroyWindow("ventana"); } int mainimagen() // main en imagen { // calcular el tiempo de espera entre cada imagen a mostrar //int delay = 1000 / fps; T_INT delay = 1; h_Matriz<float> kernelx; h_Matriz<float> kernely; /********************************************** * M O D O **********************************************/ int modo = 3;// CPU:1, PAR:2, GPU:3 int filtros = 1; //------------------------------------------------ // OBTENER KERNEL PARA FILTRO //------------------------------------------------ char *modoNombre; modoNombre = new char[250]; //strcpy(modoNombre, "sobel.ker"); // 3x3 dos filtros //strcpy(modoNombre, "repujado.ker");// 3x3 un filtro //strcpy(modoNombre, "media3.ker"); //strcpy(modoNombre, "media5.ker"); //strcpy(modoNombre, "media11.ker"); //strcpy(modoNombre, "media15.ker"); strcpy(modoNombre, "media25.ker"); AbrirKernel<T_FLOAT>(modoNombre, &kernelx, &kernely, filtros); std::cout << "Kernel: " << modoNombre << std::endl; kernelx.Imprimir(); kernely.Imprimir(); // para cambiar el kernel para obtener kernel para convolution convolucion(&kernelx); convolucion(&kernely); //std::cout << "Kernel con covolucion" << std::endl; //kernelx.Imprimir(); //kernely.Imprimir(); // Para el DEVICE d_Matriz<float> d_kernelx(kernelx); d_Matriz<float> d_kernely(kernely); Mat cvimagen, cvimageng; double contf = 0; cvimagen = imread("alpaca1000.jpg", 1); cv::cvtColor(cvimagen, cvimageng, CV_BGR2GRAY); cvimageng.convertTo(cvimageng, CV_32FC1); // en HOST h_Matriz<float> imagen(cvimageng); h_Matriz<float> imagenout(imagen.row, imagen.col); //cargar el primer cuadro o imagen del video en frame //--------------------------------------- // Corrida en CPU un solo nucleo //--------------------------------------- h_tIni = clock(); if (filtros == 2) correlacion2(&imagen, &imagenout, kernelx, kernely); else correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); std::cout << "CPU : " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; //--------------------------------------- // Corrida en CPU Paralela Utilizando OpenMP //--------------------------------------- h_tIni = clock(); if (filtros == 2) p_correlacion2(&imagen, &imagenout, kernelx, kernely); else p_correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); std::cout << "CPU Paralela: " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; //--------------------------------------- // Corrida en GPU , el tiempo incluye copia de archivo a memorua GPU y viceversa //--------------------------------------- float d_ttemp; hipEventCreate(&d_tIni); hipEventCreate(&d_tFin); hipEventRecord(d_tIni, 0); d_Matriz<float> d_imagen(imagen); d_Matriz<float> d_imagenout(imagenout); if (filtros == 2) d_correlacion2<float> << <imagen.col, imagen.row / 2 >> >(d_imagen, d_imagenout, d_kernelx, d_kernely); else d_correlacion<float> << <imagen.col, imagen.row / 2 >> >(d_imagen, d_imagenout, d_kernelx); d_imagenout.Get_Matriz(imagenout); checkCudaErrors(hipFree(d_imagen.d_ptr)); checkCudaErrors(hipFree(d_imagenout.d_ptr)); hipEventRecord(d_tFin, 0); hipEventSynchronize(d_tFin); hipEventElapsedTime(&d_ttemp, d_tIni, d_tFin); strcpy(modoNombre, "Salida"); std::cout << "GPU: " << d_ttemp << "ms por frame" << endl; imagenout.h_Matriz2Mat(&cvimageng); cvimageng.convertTo(cvimageng, CV_8UC1); cv::imshow(modoNombre, cvimageng); cv::waitKey(); delete imagen.ptr; delete imagenout.ptr; std::cout << "Ups, se termino....." << endl; cv::destroyWindow("ventana"); return 0; }
757387a41d36fac987825721a543ab6a8d49c6fa.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <sstream> #include <stdio.h> #include <type_traits> #include <cmath> #include <time.h> #include <fstream> #include <opencv2\core\core.hpp> #include <opencv2\highgui\highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <omp.h> typedef double T_DOUBLE; typedef char T_CHAR; typedef long T_LONG; typedef float T_FLOAT; typedef int T_INT; typedef unsigned char T_BYTE; const T_LONG BLOQUELINEA = 1024; using namespace cv; using namespace std; #define norm(x, y) (fabs(x) + fabs(y)) //Variables globales clock_t h_tIni, h_tFin, h_tTotal; // Para calculo de tiempo en CPU cudaEvent_t d_tIni, d_tFin; float d_tTotal; // Para calculo de tiempo en GPU /********************************************* * PARA VERIFICAR ERRORES DE CUDA QUE SE DESENCADENA DESDE EL HOST *********************************************/ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /********************************************** * FUNCION PARA OBTENER EL TIEMPO EN CPU **********************************************/ double getMilisegundos(clock_t c) { double tiempo = 0; tiempo = ((c / (double)CLOCKS_PER_SEC) * 1000); return tiempo; } /************************************************************* * PARTE HOST *************************************************************/ template<class T> class h_Matriz { public: T *ptr; size_t row, col; h_Matriz(){} h_Matriz(size_t n) { inicializar(n,n); } h_Matriz(size_t m, size_t n) { inicializar(m, n); } h_Matriz(cv::Mat img) { inicializar(img.rows, img.cols); for (int i = 0; i < row; i++) memcpy(&(ptr[i*col]), img.ptr<T>(i, 0), col * sizeof(T)); } void inicializar(size_t m, size_t n) { row = m; col = n; ptr = new T[row * col]; } inline T Get(size_t r, size_t c) { return *(ptr + r*(col)+c); } inline void Set(size_t r, size_t c, T val) { *(ptr + r*(col)+c) = val; } void Set_Matriz(h_Matriz<T> mat) { delete ptr; inicializar(mat.row, mat.col); memcpy(&(ptr[0]), &(mat.ptr[0]), row*col * sizeof(T)); } void Get_Matriz(h_Matriz<T> *mat) { if (mat->row == row && mat->col == col) memcpy(&mat->ptr[0], &(ptr[0]), row*col * sizeof(T)); } void h_Matriz2Mat(cv::Mat *img) { if (img->rows == row && img->cols == col) for (size_t i = 0; i < row; i++) memcpy(img->ptr<T>(i, 0), &(ptr[i*col]), col * sizeof(T)); } void Imprimir() { for (size_t i = 0; i < row; i++) { for (size_t j = 0; j < col; j++) cout << ptr[i*col + j] << "\t"; cout << endl; } } ~h_Matriz() { } }; // modificar el kernel para convolucion template<class T> void convolucion(h_Matriz<T> *kernel) { int r = kernel->row ; int c = kernel->col; h_Matriz<T> temp(r,c); for (int k = 0; k < r; k++) for (int l = 0; l < c; l++) temp.Set(k, l, kernel->Get(r - k-1, c - l-1)); kernel->Set_Matriz(temp); } // correlacion, es convolucion si el kernel es modificado para convolucion template<class T> void correlacion(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T> kernel) { T suma1; int mitad = kernel.row / 2; for (int i = mitad; i < img->row-mitad;i++) for (int j = mitad; j < img->col - mitad; j++) { suma1 = 0; for (int k = 0; k < kernel.row; k++) for (int l = 0; l < kernel.col; l++) suma1 += img->Get(i - mitad + k, j - mitad + l)*kernel.Get(k, l); imgout->Set(i, j, (T)(suma1)); } } // correlacion2 con dos kernel, es convolucion si los kernel es modificado para convolucion template<class T, class T1> void correlacion2(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T1> kernel, h_Matriz<T1> kernel2) { T suma1; T suma2; T tmp; int mitad = kernel.row / 2; for (int i = mitad; i < img->row - mitad; i++) for (int j = mitad; j < img->col - mitad; j++){ suma1 = 0; suma2 = 0; for (int k = 0; k < kernel.row; k++) for (int l = 0; l < kernel.col; l++){ tmp = img->Get(i - mitad+k, j - mitad+l); suma1 += tmp*kernel.Get(k, l); suma2 += tmp*kernel2.Get(k, l); } T val = norm((T)suma1, (T)suma2); imgout->Set(i, j, (T)val); } } /************************************************************* * PARTE HOST - paralelo con OpenMP *************************************************************/ // correlacion, es convolucion si el kernel es modificado para convolucion template<class T> void p_correlacion(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T> kernel) { T suma1; int mitad = kernel.row / 2; int i, j, k, l; int ir = img->row - mitad; int ic = img->col - mitad; int kr = kernel.row; int kc = kernel.row; //kernel.Imprimir(); #pragma omp parallel for shared(kernel, img,imgout, ir, ic,kr,kc, mitad) private( i, j, k, l, suma1) for (int i = mitad; i < ir; i++) for (int j = mitad; j < ic; j++) { suma1 = 0; for (int k = 0; k < kr; k++) for (int l = 0; l < kc; l++) suma1 += img->Get(i - mitad + k, j - mitad + l)*kernel.Get(k, l); //std::cout << suma1<<std::endl; imgout->Set(i, j, (T)(suma1)); } } // correlacion2 con dos kernel, es convolucion si los kernel es modificado para convolucion template<class T, class T1> void p_correlacion2(h_Matriz<T> *img, h_Matriz<T> *imgout, h_Matriz<T1> kernel, h_Matriz<T1> kernel2) { T suma1; T suma2; T tmp; T_INT mitad = kernel.row / 2; T val; T_INT ir = img->row - mitad; T_INT ic = img->col - mitad; T_INT kr = kernel.row; T_INT kc = kernel.row; T_INT i, j, k, l; omp_set_num_threads(8); #pragma omp parallel for shared(kernel, kernel2, img,imgout, ir, ic,kr,kc, mitad) private( i, j, k, l, suma1, suma2,tmp) for (i = mitad; i < ir; i++){ for (j = mitad; j < ic; j++){ suma1 = 0; suma2 = 0; for (k = 0; k < kr; k++){ for (l = 0; l < kc; l++){ tmp = img->Get(i - mitad + k, j - mitad + l); suma1 += tmp*kernel.Get(k, l); suma2 += tmp*kernel2.Get(k, l); } } imgout->Set(i, j, (T)norm((T)suma1, (T)suma2)); } } } /***************************************************************** * PARTE DEVICE *****************************************************************/ template<class T> class d_Matriz { public: T *d_ptr; size_t row, col; d_Matriz(h_Matriz<T> mat) { inicializar(mat); } void inicializar(h_Matriz<T> mat) { row = mat.row; col = mat.col; checkCudaErrors(cudaMalloc((void**)&d_ptr, row*col * sizeof(T))); checkCudaErrors(cudaMemcpy(d_ptr, mat.ptr, col* row*sizeof(T), cudaMemcpyHostToDevice)); } __device__ inline T Get(size_t r, size_t c) { return *(d_ptr + r*(col)+c); } __device__ inline void Set(size_t r, size_t c, T val) { *(d_ptr + r*(col)+c) = val; } void Set_Matriz(h_Matriz<T> mat) { checkCudaErrors(cudaFree(d_ptr)); inicializar(mat); } void Get_Matriz(h_Matriz<T> mat) { if (mat.row == row && mat.col == col) checkCudaErrors(cudaMemcpy(mat.ptr, d_ptr, col* row*sizeof(T), cudaMemcpyDeviceToHost)); } ~d_Matriz() { } }; // modificar el kernel para convolucion template<class T> __device__ void d_conv3(d_Matriz<T> *kernel) { int r = kernel->row - 1; int c = kernel->col - 1; d_Matriz<T> temp(3); for (int k = 0; k <= r; k++) for (int l = 0; l <= c; l++) temp.Set(k, l, kernel->Get(r-k, c-l)); kernel->Set_Matriz(temp); } // correlacion, es convolucion si el kernel es modificado para convolucion template<class T> __global__ void d_correlacion(d_Matriz<T> img, d_Matriz<T> imgout, d_Matriz<T> kernel) { T suma1; size_t i = threadIdx.x; size_t j = blockIdx.x; T_INT mitad = kernel.row / 2; while (j<(img.row - mitad)*img.col) { suma1 = 0; for (T_INT k = 0; k < kernel.row; k++) for (T_INT l = 0; l < kernel.col; l++) suma1 += img.Get(i - mitad + k, j - mitad + l)*kernel.Get(k, l); imgout.Set(i, j, (T)(suma1)); j += blockDim.x*gridDim.x; } } //__device__ void d_correlacion(d_Matriz<T> &img, d_Matriz<T> &imgout, d_Matriz<T> kernel) // correlacion2 con dos kernel, es convolucion si los kernel es modificado para convolucion template<class T, class T1> __global__ void d_correlacion2(d_Matriz<T> img, d_Matriz<T> imgout, d_Matriz<T1> kernel, d_Matriz<T1> kernel2) { T suma1; T suma2; T tmp; T_INT mitad = kernel.row / 2; size_t i = threadIdx.x; size_t j = blockIdx.x; while (j<(img.row - mitad)*img.col) { suma1 = 0; suma2 = 0; for (T_INT k = 0; k < kernel.row; k++) for (T_INT l = 0; l < kernel.col; l++){ tmp = img.Get(i - mitad+k, j-mitad+l); suma1 += tmp*kernel.Get(k, l); suma2 += tmp*kernel2.Get(k, l); } imgout.Set(i, j, (T)norm((T)suma1, (T)suma2)); j += blockDim.x*gridDim.x; } } /***************************************************************** * OBTENER FILTRO DE ARCHIVO.KER *****************************************************************/ template<class T> void AbrirKernel(T_CHAR *FileOrigen, h_Matriz<T> *kernel, h_Matriz<T> *kernel2, T_INT &nroKernel) { std::ifstream origen(FileOrigen); if (origen.fail()) std::cerr << "Error al abrir el kernel: " << FileOrigen << std::endl; else { T_CHAR *bloque; bloque = new T_CHAR[BLOQUELINEA + 1]; T_INT m, n; T_FLOAT val; // Leer el numero de Kernel origen.getline(bloque, BLOQUELINEA, '\n'); nroKernel = atoi(bloque); if (nroKernel == 1)// 1 solo kernel { origen.getline(bloque, BLOQUELINEA, '\n'); m = atoi(bloque); origen.getline(bloque, BLOQUELINEA, '\n'); n = atoi(bloque); kernel->inicializar(m, n); kernel2->inicializar(m, n); for (T_INT i = 0; i < m; i++) // llenar la matriz for (T_INT j = 0; j < m; j++) { origen.getline(bloque, BLOQUELINEA, '\n'); val = atof(bloque); kernel->Set(i, j, val); kernel2->Set(i, j, val); } } else // se supone que son dos filtros para una misma convolucion ejemplo sobel { // para el primer kernel origen.getline(bloque, BLOQUELINEA, '\n'); m = atoi(bloque); origen.getline(bloque, BLOQUELINEA, '\n'); n = atoi(bloque); kernel->inicializar(m, n); for (T_INT i = 0; i < m; i++) // llenar la matriz for (T_INT j = 0; j < m; j++) { origen.getline(bloque, BLOQUELINEA, '\n'); val = atof(bloque); kernel->Set(i, j, val); } // para el segundo kernel origen.getline(bloque, BLOQUELINEA, '\n'); m = atoi(bloque); origen.getline(bloque, BLOQUELINEA, '\n'); n = atoi(bloque); kernel2->inicializar(m, n); for (T_INT i = 0; i < m; i++) // llenar la matriz for (T_INT j = 0; j < m; j++) { origen.getline(bloque, BLOQUELINEA, '\n'); val = atof(bloque); kernel2->Set(i, j, val); } } } } int main() // main en video { //namedWindow("ventana", CV_WINDOW_AUTOSIZE); //cargar el archivo de video especificado cv::VideoCapture cvideo("video2.mp4"); //verificar si se ha podio cargar el video if (!cvideo.isOpened()) return -1; // obtener los cuadros por segundo T_DOUBLE fps = cvideo.get(CV_CAP_PROP_FPS); T_DOUBLE nf = cvideo.get(CV_CAP_PROP_FRAME_COUNT); cout << "Nro de frames: " << nf<< endl; cout<<"Nro frames por segundos: " <<fps << endl; // calcular el tiempo de espera entre cada imagen a mostrar //int delay = 1000 / fps; T_INT delay = 1; h_Matriz<float> kernelx; h_Matriz<float> kernely; /********************************************** * M O D O **********************************************/ int modo = 3;// CPU:1, PAR:2, GPU:3 int filtros = 1; //------------------------------------------------ // OBTENER KERNEL PARA FILTRO //------------------------------------------------ char *modoNombre; modoNombre = new char[250]; //strcpy(modoNombre, "sobel.ker"); // 3x3 dos filtros //strcpy(modoNombre, "repujado.ker");// 3x3 un filtro //strcpy(modoNombre, "media3.ker"); //strcpy(modoNombre, "media5.ker"); //strcpy(modoNombre, "media11.ker"); //strcpy(modoNombre, "media15.ker"); strcpy(modoNombre, "media25.ker"); AbrirKernel<T_FLOAT>(modoNombre,&kernelx,&kernely,filtros); std::cout << "Kernel: "<<modoNombre <<std::endl; kernelx.Imprimir(); kernely.Imprimir(); std::cout << "presione g para correr en gpu," << std::endl << " p para correr en cpu paralelo y " << std::endl << " c para correr en cpu secuencial...." << std::endl<<" esc para salir... o esperar que termine el video..."; getchar(); // para cambiar el kernel para obtener kernel para convolution convolucion(&kernelx); convolucion(&kernely); //std:: cout << "Kernel con covolucion"<<std::endl; //kernelx.Imprimir(); //kernely.Imprimir(); // Para el DEVICE d_Matriz<float> d_kernelx(kernelx); d_Matriz<float> d_kernely(kernely); //d_correlacion2<float> << <imagen.col, 1024 >> >(d_imagen, d_imagenout, d_kernelx, d_kernely); //d_imagenout.Get_Matriz(imagenout); Mat cvimagen, cvimageng ; double contf = 0; while (contf <nf) { cvideo >> cvimagen; cv::cvtColor(cvimagen, cvimageng, CV_BGR2GRAY); cvimageng.convertTo(cvimageng, CV_32FC1); // en HOST h_Matriz<float> imagen(cvimageng); h_Matriz<float> imagenout(imagen.row, imagen.col); //cargar el primer cuadro o imagen del video en frame switch (modo) { case 1: { h_tIni = clock(); if (filtros==2) correlacion2(&imagen, &imagenout, kernelx, kernely); else correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); cout << "CPU : " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; break; } case 2: { h_tIni = clock(); if (filtros == 2) p_correlacion2(&imagen, &imagenout, kernelx, kernely); else p_correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); cout << "CPU Paralela: " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; break; } case 3: { float d_ttemp; cudaEventCreate(&d_tIni); cudaEventCreate(&d_tFin); cudaEventRecord(d_tIni, 0); d_Matriz<float> d_imagen(imagen); d_Matriz<float> d_imagenout(imagenout); if (filtros == 2) d_correlacion2<float> <<<imagen.col, imagen.row / 2 >>>(d_imagen, d_imagenout, d_kernelx, d_kernely); else d_correlacion<float> <<<imagen.col, imagen.row / 2 >>>(d_imagen, d_imagenout, d_kernelx); d_imagenout.Get_Matriz(imagenout); checkCudaErrors(cudaFree(d_imagen.d_ptr)); checkCudaErrors(cudaFree(d_imagenout.d_ptr)); cudaEventRecord(d_tFin, 0); cudaEventSynchronize(d_tFin); cudaEventElapsedTime(&d_ttemp, d_tIni, d_tFin); strcpy(modoNombre, "Salida"); cout << "GPU: " << d_ttemp << "ms por frame" << endl; break; } default: break; } imagenout.h_Matriz2Mat(&cvimageng); cvimageng.convertTo(cvimageng, CV_8UC1); cv::imshow(modoNombre, cvimageng); //esperar un periodo de tiempo especificado por delay //si se presiona la tecla 27 (ESC) salir del loop uchar tec=cv::waitKey(delay); //cout << tec<<endl; if (tec == 99) modo = 1; if (tec == 112) modo = 2; if (tec == 103) modo = 3; if (tec == 27 ) break; contf++; delete imagen.ptr; delete imagenout.ptr; } cout << "Ups, se termino el video" << endl; cv::waitKey(); cv::destroyWindow("ventana"); } int mainimagen() // main en imagen { // calcular el tiempo de espera entre cada imagen a mostrar //int delay = 1000 / fps; T_INT delay = 1; h_Matriz<float> kernelx; h_Matriz<float> kernely; /********************************************** * M O D O **********************************************/ int modo = 3;// CPU:1, PAR:2, GPU:3 int filtros = 1; //------------------------------------------------ // OBTENER KERNEL PARA FILTRO //------------------------------------------------ char *modoNombre; modoNombre = new char[250]; //strcpy(modoNombre, "sobel.ker"); // 3x3 dos filtros //strcpy(modoNombre, "repujado.ker");// 3x3 un filtro //strcpy(modoNombre, "media3.ker"); //strcpy(modoNombre, "media5.ker"); //strcpy(modoNombre, "media11.ker"); //strcpy(modoNombre, "media15.ker"); strcpy(modoNombre, "media25.ker"); AbrirKernel<T_FLOAT>(modoNombre, &kernelx, &kernely, filtros); std::cout << "Kernel: " << modoNombre << std::endl; kernelx.Imprimir(); kernely.Imprimir(); // para cambiar el kernel para obtener kernel para convolution convolucion(&kernelx); convolucion(&kernely); //std::cout << "Kernel con covolucion" << std::endl; //kernelx.Imprimir(); //kernely.Imprimir(); // Para el DEVICE d_Matriz<float> d_kernelx(kernelx); d_Matriz<float> d_kernely(kernely); Mat cvimagen, cvimageng; double contf = 0; cvimagen = imread("alpaca1000.jpg", 1); cv::cvtColor(cvimagen, cvimageng, CV_BGR2GRAY); cvimageng.convertTo(cvimageng, CV_32FC1); // en HOST h_Matriz<float> imagen(cvimageng); h_Matriz<float> imagenout(imagen.row, imagen.col); //cargar el primer cuadro o imagen del video en frame //--------------------------------------- // Corrida en CPU un solo nucleo //--------------------------------------- h_tIni = clock(); if (filtros == 2) correlacion2(&imagen, &imagenout, kernelx, kernely); else correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); std::cout << "CPU : " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; //--------------------------------------- // Corrida en CPU Paralela Utilizando OpenMP //--------------------------------------- h_tIni = clock(); if (filtros == 2) p_correlacion2(&imagen, &imagenout, kernelx, kernely); else p_correlacion(&imagen, &imagenout, kernelx); h_tFin = clock(); strcpy(modoNombre, "Salida"); std::cout << "CPU Paralela: " << getMilisegundos(h_tFin - h_tIni) << "ms por frame" << endl; //--------------------------------------- // Corrida en GPU , el tiempo incluye copia de archivo a memorua GPU y viceversa //--------------------------------------- float d_ttemp; cudaEventCreate(&d_tIni); cudaEventCreate(&d_tFin); cudaEventRecord(d_tIni, 0); d_Matriz<float> d_imagen(imagen); d_Matriz<float> d_imagenout(imagenout); if (filtros == 2) d_correlacion2<float> << <imagen.col, imagen.row / 2 >> >(d_imagen, d_imagenout, d_kernelx, d_kernely); else d_correlacion<float> << <imagen.col, imagen.row / 2 >> >(d_imagen, d_imagenout, d_kernelx); d_imagenout.Get_Matriz(imagenout); checkCudaErrors(cudaFree(d_imagen.d_ptr)); checkCudaErrors(cudaFree(d_imagenout.d_ptr)); cudaEventRecord(d_tFin, 0); cudaEventSynchronize(d_tFin); cudaEventElapsedTime(&d_ttemp, d_tIni, d_tFin); strcpy(modoNombre, "Salida"); std::cout << "GPU: " << d_ttemp << "ms por frame" << endl; imagenout.h_Matriz2Mat(&cvimageng); cvimageng.convertTo(cvimageng, CV_8UC1); cv::imshow(modoNombre, cvimageng); cv::waitKey(); delete imagen.ptr; delete imagenout.ptr; std::cout << "Ups, se termino....." << endl; cv::destroyWindow("ventana"); return 0; }
208ca31fdd7481e01a1ccc446aef7b234c72938b.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaCommon.h> #include <CudaAssert.h> #include <CudaDevice.h> #include <CublasFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <hip/hip_runtime_api.h> namespace NeoML { void CCudaMathEngine::VectorDotProduct(const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, int vectorSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sdot( cublasHandle, vectorSize, GetRaw( firstHandle ), 1, GetRaw( secondHandle ), 1, GetRaw( resultHandle ) ) ); } void CCudaMathEngine::VectorMultiplyAndAdd( const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int vectorSize, const CConstFloatHandle& multHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const float* first = GetRaw( firstHandle ); const float* second = GetRaw( secondHandle ); float* result = GetRaw( resultHandle ); const float* mult = GetRaw( multHandle ); if( result != first ) { ASSERT_CUDA( hipMemcpy( result, first, vectorSize * sizeof( float ), hipMemcpyDeviceToDevice ) ); } ASSERT_CUBLAS( cublas->Saxpy( cublasHandle, vectorSize, mult, second, 1, result, 1 ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstZero, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondHeight, const CFloatHandle& resultHandle, int resultBufferSize ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), firstWidth, firstWidth * secondHeight, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondHeight, secondHeight * firstHeight, batchSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrixAndAdd( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondWidth, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw(secondHandle), secondWidth, firstHeight * secondWidth, GetRaw(firstHandle), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw(resultHandle), secondWidth, firstWidth * secondWidth, batchSize ) ); } void CCudaMathEngine::MultiplyMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); if( batchSize == 1 ) { ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, GetRaw( firstHandle ), firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth ) ); } else { ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, firstWidth * secondWidth, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth, secondWidth * firstHeight, batchSize ) ); } } void CCudaMathEngine::multiplyMatrixByTransposedMatrixAndAdd(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize) { SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } } // namespace NeoML #endif // NEOML_USE_CUDA
208ca31fdd7481e01a1ccc446aef7b234c72938b.cu
/* Copyright © 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaCommon.h> #include <CudaAssert.h> #include <CudaDevice.h> #include <CublasFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <cuda_runtime_api.h> namespace NeoML { void CCudaMathEngine::VectorDotProduct(const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, int vectorSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sdot( cublasHandle, vectorSize, GetRaw( firstHandle ), 1, GetRaw( secondHandle ), 1, GetRaw( resultHandle ) ) ); } void CCudaMathEngine::VectorMultiplyAndAdd( const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int vectorSize, const CConstFloatHandle& multHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const float* first = GetRaw( firstHandle ); const float* second = GetRaw( secondHandle ); float* result = GetRaw( resultHandle ); const float* mult = GetRaw( multHandle ); if( result != first ) { ASSERT_CUDA( cudaMemcpy( result, first, vectorSize * sizeof( float ), cudaMemcpyDeviceToDevice ) ); } ASSERT_CUBLAS( cublas->Saxpy( cublasHandle, vectorSize, mult, second, 1, result, 1 ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstZero, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondHeight, const CFloatHandle& resultHandle, int resultBufferSize ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), firstWidth, firstWidth * secondHeight, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondHeight, secondHeight * firstHeight, batchSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrixAndAdd( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondWidth, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw(secondHandle), secondWidth, firstHeight * secondWidth, GetRaw(firstHandle), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw(resultHandle), secondWidth, firstWidth * secondWidth, batchSize ) ); } void CCudaMathEngine::MultiplyMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); if( batchSize == 1 ) { ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, GetRaw( firstHandle ), firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth ) ); } else { ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, firstWidth * secondWidth, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth, secondWidth * firstHeight, batchSize ) ); } } void CCudaMathEngine::multiplyMatrixByTransposedMatrixAndAdd(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize) { SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } } // namespace NeoML #endif // NEOML_USE_CUDA
db9bcc0ed0e17f615c9c05cc637608f4308234d8.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include "../common/stopwatch.h" #define LOG 0 void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); } void initialData(float *ip, int size) { time_t t; srand((unsigned)time(&t)); for (int i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void switchOnCPU(float *A, float *B, const int N) { for (int i = 0; i < N; i++) B[N - i-1] = A[i]; } __global__ void switchOnGPU(float *A, float *B, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) B[N-i-1] = A[i]; } void print(float* A,float*B, const int N) { printf("\n\n"); for (int i = 0; i < N; i++) printf("%f ", A[i]); printf("\n"); printf("\n"); for (int i = 0; i < N; i++) printf("%f ", B[i]); printf("\n"); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); int power = 18; int nthreads = 512; if (argc > 1) { nthreads = atoi(argv[1]); } if (argc > 2) { power = atoi(argv[2]); } int nElem = 1 << power; printf("Vector size %d\n", nElem); // alociranje host memorije size_t nBytes = nElem * sizeof(float); float *h_A, *h_B,*h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); /*for (int i = 0; i < nElem; i++) h_A[i] =(float) i + 1; */ dim3 block(nthreads); dim3 grid((nElem + block.x - 1) / block.x); Stopwatch s; switchOnCPU(h_A, h_B, nElem); printf("sumArraysOnCPU - Time elapsed %f: ",s.elapsed()); // alociranje device globalne memorije //print(h_A, h_B, nElem); float *d_A, *d_B; hipMalloc((float**)&d_A, nBytes); hipMalloc((float**)&d_B, nBytes); // transfer podataka sa host-a na device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); s.reset(); hipLaunchKernelGGL(( switchOnGPU) , dim3(grid), dim3(block), 0, 0, d_A, d_B, nElem); hipDeviceSynchronize(); printf("\nsumArraysOnGPU <<<%d,%d>>> Time elapsed %f" \ "sec\n", grid.x, block.x, s.elapsed()); // kopiranje rezultata kernela nazad na host hipMemcpy(h_C, d_B, nBytes, hipMemcpyDeviceToHost); // provera device rezultata checkResult(h_B, h_C, nElem); // oslobadanje globalne memorije device-a hipFree(d_A); hipFree(d_B); // oslobadanje host memorije free(h_A); free(h_B); free(h_C); hipDeviceReset(); return(0); } /* GeForce GTX 1050 Vector size 262144 sumArraysOnCPU - Time elapsed 0.001133: sumArraysOnGPU <<<8192,32>>> Time elapsed 0.000464sec Arrays match. ==8520== Profiling application: a 32 ==8520== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 42.49% 157.92us 1 157.92us 157.92us 157.92us [CUDA memcpy HtoD] 42.16% 156.70us 1 156.70us 156.70us 156.70us [CUDA memcpy DtoH] 15.34% 57.023us 1 57.023us 57.023us 57.023us switchOnGPU(float*, float*, int) API calls: 72.82% 208.47ms 2 104.23ms 12.400us 208.46ms hipMalloc 25.98% 74.386ms 1 74.386ms 74.386ms 74.386ms hipDeviceReset 0.43% 1.2370ms 2 618.50us 244.70us 992.30us hipMemcpy 0.24% 687.90us 97 7.0910us 200ns 314.60us hipDeviceGetAttribute 0.21% 590.30us 1 590.30us 590.30us 590.30us hipGetDeviceProperties 0.14% 400.40us 1 400.40us 400.40us 400.40us hipDeviceSynchronize 0.13% 359.10us 2 179.55us 90.100us 269.00us hipFree 0.02% 59.000us 1 59.000us 59.000us 59.000us cudaLaunchKernel 0.01% 37.700us 1 37.700us 37.700us 37.700us cuDeviceTotalMem 0.00% 13.800us 1 13.800us 13.800us 13.800us hipDeviceGetPCIBusId 0.00% 13.000us 1 13.000us 13.000us 13.000us hipSetDevice 0.00% 7.4000us 2 3.7000us 400ns 7.0000us hipDeviceGet 0.00% 2.2000us 3 733ns 300ns 1.1000us hipGetDeviceCount 0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us hipDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 500ns 1 500ns 500ns 500ns hipDeviceGetUuid Vector size 262144 sumArraysOnCPU - Time elapsed 0.001133: sumArraysOnGPU <<<8192,32>>> Time elapsed 0.000464sec Arrays match. ==8520== Profiling application: a 32 ==8520== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 42.49% 157.92us 1 157.92us 157.92us 157.92us [CUDA memcpy HtoD] 42.16% 156.70us 1 156.70us 156.70us 156.70us [CUDA memcpy DtoH] 15.34% 57.023us 1 57.023us 57.023us 57.023us switchOnGPU(float*, float*, int) API calls: 72.82% 208.47ms 2 104.23ms 12.400us 208.46ms hipMalloc 25.98% 74.386ms 1 74.386ms 74.386ms 74.386ms hipDeviceReset 0.43% 1.2370ms 2 618.50us 244.70us 992.30us hipMemcpy 0.24% 687.90us 97 7.0910us 200ns 314.60us hipDeviceGetAttribute 0.21% 590.30us 1 590.30us 590.30us 590.30us hipGetDeviceProperties 0.14% 400.40us 1 400.40us 400.40us 400.40us hipDeviceSynchronize 0.13% 359.10us 2 179.55us 90.100us 269.00us hipFree 0.02% 59.000us 1 59.000us 59.000us 59.000us cudaLaunchKernel 0.01% 37.700us 1 37.700us 37.700us 37.700us cuDeviceTotalMem 0.00% 13.800us 1 13.800us 13.800us 13.800us hipDeviceGetPCIBusId 0.00% 13.000us 1 13.000us 13.000us 13.000us hipSetDevice 0.00% 7.4000us 2 3.7000us 400ns 7.0000us hipDeviceGet 0.00% 2.2000us 3 733ns 300ns 1.1000us hipGetDeviceCount 0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us hipDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 500ns 1 500ns 500ns 500ns hipDeviceGetUuid Vector size 262144 sumArraysOnCPU - Time elapsed 0.001112: sumArraysOnGPU <<<512,512>>> Time elapsed 0.000413sec Arrays match. ==5896== Profiling application: a 512 ==5896== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 46.63% 157.70us 1 157.70us 157.70us 157.70us [CUDA memcpy HtoD] 46.46% 157.12us 1 157.12us 157.12us 157.12us [CUDA memcpy DtoH] 6.91% 23.360us 1 23.360us 23.360us 23.360us switchOnGPU(float*, float*, int) API calls: 73.60% 209.67ms 2 104.83ms 12.300us 209.65ms hipMalloc 25.22% 71.841ms 1 71.841ms 71.841ms 71.841ms hipDeviceReset 0.42% 1.2078ms 2 603.90us 243.60us 964.20us hipMemcpy 0.24% 693.50us 97 7.1490us 200ns 315.50us hipDeviceGetAttribute 0.21% 595.20us 1 595.20us 595.20us 595.20us hipGetDeviceProperties 0.12% 346.60us 1 346.60us 346.60us 346.60us hipDeviceSynchronize 0.12% 334.90us 2 167.45us 87.400us 247.50us hipFree 0.02% 69.800us 1 69.800us 69.800us 69.800us cuDeviceTotalMem 0.02% 61.600us 1 61.600us 61.600us 61.600us cudaLaunchKernel 0.00% 13.500us 1 13.500us 13.500us 13.500us hipDeviceGetPCIBusId 0.00% 12.700us 1 12.700us 12.700us 12.700us hipSetDevice 0.00% 7.1000us 2 3.5500us 400ns 6.7000us hipDeviceGet 0.00% 2.6000us 3 866ns 400ns 1.2000us hipGetDeviceCount 0.00% 1.6000us 1 1.6000us 1.6000us 1.6000us hipDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 600ns 1 600ns 600ns 600ns hipDeviceGetUuid Vector size 262144 sumArraysOnCPU - Time elapsed 0.001207: sumArraysOnGPU <<<256,1024>>> Time elapsed 0.000417sec Arrays match. ==7556== Profiling application: a 1024 ==7556== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 46.36% 157.47us 1 157.47us 157.47us 157.47us [CUDA memcpy HtoD] 46.26% 157.12us 1 157.12us 157.12us 157.12us [CUDA memcpy DtoH] 7.38% 25.056us 1 25.056us 25.056us 25.056us switchOnGPU(float*, float*, int) API calls: 72.98% 211.48ms 2 105.74ms 14.800us 211.46ms hipMalloc 25.85% 74.896ms 1 74.896ms 74.896ms 74.896ms hipDeviceReset 0.44% 1.2658ms 2 632.90us 255.80us 1.0100ms hipMemcpy 0.24% 686.70us 97 7.0790us 200ns 312.90us hipDeviceGetAttribute 0.21% 614.60us 1 614.60us 614.60us 614.60us hipGetDeviceProperties 0.12% 350.90us 1 350.90us 350.90us 350.90us hipDeviceSynchronize 0.12% 341.80us 2 170.90us 81.700us 260.10us hipFree 0.02% 62.000us 1 62.000us 62.000us 62.000us cudaLaunchKernel 0.01% 41.000us 1 41.000us 41.000us 41.000us cuDeviceTotalMem 0.00% 13.600us 1 13.600us 13.600us 13.600us hipDeviceGetPCIBusId 0.00% 13.100us 1 13.100us 13.100us 13.100us hipSetDevice 0.00% 6.9000us 2 3.4500us 400ns 6.5000us hipDeviceGet 0.00% 2.2000us 3 733ns 300ns 1.2000us hipGetDeviceCount 0.00% 1.3000us 1 1.3000us 1.3000us 1.3000us hipDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 500ns 1 500ns 500ns 500ns hipDeviceGetUuid */
db9bcc0ed0e17f615c9c05cc637608f4308234d8.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> #include <stdlib.h> #include "../common/stopwatch.h" #define LOG 0 void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); } void initialData(float *ip, int size) { time_t t; srand((unsigned)time(&t)); for (int i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void switchOnCPU(float *A, float *B, const int N) { for (int i = 0; i < N; i++) B[N - i-1] = A[i]; } __global__ void switchOnGPU(float *A, float *B, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) B[N-i-1] = A[i]; } void print(float* A,float*B, const int N) { printf("\n\n"); for (int i = 0; i < N; i++) printf("%f ", A[i]); printf("\n"); printf("\n"); for (int i = 0; i < N; i++) printf("%f ", B[i]); printf("\n"); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); int power = 18; int nthreads = 512; if (argc > 1) { nthreads = atoi(argv[1]); } if (argc > 2) { power = atoi(argv[2]); } int nElem = 1 << power; printf("Vector size %d\n", nElem); // alociranje host memorije size_t nBytes = nElem * sizeof(float); float *h_A, *h_B,*h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); /*for (int i = 0; i < nElem; i++) h_A[i] =(float) i + 1; */ dim3 block(nthreads); dim3 grid((nElem + block.x - 1) / block.x); Stopwatch s; switchOnCPU(h_A, h_B, nElem); printf("sumArraysOnCPU - Time elapsed %f: ",s.elapsed()); // alociranje device globalne memorije //print(h_A, h_B, nElem); float *d_A, *d_B; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); // transfer podataka sa host-a na device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); s.reset(); switchOnGPU <<<grid, block>>> (d_A, d_B, nElem); cudaDeviceSynchronize(); printf("\nsumArraysOnGPU <<<%d,%d>>> Time elapsed %f" \ "sec\n", grid.x, block.x, s.elapsed()); // kopiranje rezultata kernela nazad na host cudaMemcpy(h_C, d_B, nBytes, cudaMemcpyDeviceToHost); // provera device rezultata checkResult(h_B, h_C, nElem); // oslobadanje globalne memorije device-a cudaFree(d_A); cudaFree(d_B); // oslobadanje host memorije free(h_A); free(h_B); free(h_C); cudaDeviceReset(); return(0); } /* GeForce GTX 1050 Vector size 262144 sumArraysOnCPU - Time elapsed 0.001133: sumArraysOnGPU <<<8192,32>>> Time elapsed 0.000464sec Arrays match. ==8520== Profiling application: a 32 ==8520== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 42.49% 157.92us 1 157.92us 157.92us 157.92us [CUDA memcpy HtoD] 42.16% 156.70us 1 156.70us 156.70us 156.70us [CUDA memcpy DtoH] 15.34% 57.023us 1 57.023us 57.023us 57.023us switchOnGPU(float*, float*, int) API calls: 72.82% 208.47ms 2 104.23ms 12.400us 208.46ms cudaMalloc 25.98% 74.386ms 1 74.386ms 74.386ms 74.386ms cudaDeviceReset 0.43% 1.2370ms 2 618.50us 244.70us 992.30us cudaMemcpy 0.24% 687.90us 97 7.0910us 200ns 314.60us cuDeviceGetAttribute 0.21% 590.30us 1 590.30us 590.30us 590.30us cudaGetDeviceProperties 0.14% 400.40us 1 400.40us 400.40us 400.40us cudaDeviceSynchronize 0.13% 359.10us 2 179.55us 90.100us 269.00us cudaFree 0.02% 59.000us 1 59.000us 59.000us 59.000us cudaLaunchKernel 0.01% 37.700us 1 37.700us 37.700us 37.700us cuDeviceTotalMem 0.00% 13.800us 1 13.800us 13.800us 13.800us cuDeviceGetPCIBusId 0.00% 13.000us 1 13.000us 13.000us 13.000us cudaSetDevice 0.00% 7.4000us 2 3.7000us 400ns 7.0000us cuDeviceGet 0.00% 2.2000us 3 733ns 300ns 1.1000us cuDeviceGetCount 0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us cuDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 500ns 1 500ns 500ns 500ns cuDeviceGetUuid Vector size 262144 sumArraysOnCPU - Time elapsed 0.001133: sumArraysOnGPU <<<8192,32>>> Time elapsed 0.000464sec Arrays match. ==8520== Profiling application: a 32 ==8520== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 42.49% 157.92us 1 157.92us 157.92us 157.92us [CUDA memcpy HtoD] 42.16% 156.70us 1 156.70us 156.70us 156.70us [CUDA memcpy DtoH] 15.34% 57.023us 1 57.023us 57.023us 57.023us switchOnGPU(float*, float*, int) API calls: 72.82% 208.47ms 2 104.23ms 12.400us 208.46ms cudaMalloc 25.98% 74.386ms 1 74.386ms 74.386ms 74.386ms cudaDeviceReset 0.43% 1.2370ms 2 618.50us 244.70us 992.30us cudaMemcpy 0.24% 687.90us 97 7.0910us 200ns 314.60us cuDeviceGetAttribute 0.21% 590.30us 1 590.30us 590.30us 590.30us cudaGetDeviceProperties 0.14% 400.40us 1 400.40us 400.40us 400.40us cudaDeviceSynchronize 0.13% 359.10us 2 179.55us 90.100us 269.00us cudaFree 0.02% 59.000us 1 59.000us 59.000us 59.000us cudaLaunchKernel 0.01% 37.700us 1 37.700us 37.700us 37.700us cuDeviceTotalMem 0.00% 13.800us 1 13.800us 13.800us 13.800us cuDeviceGetPCIBusId 0.00% 13.000us 1 13.000us 13.000us 13.000us cudaSetDevice 0.00% 7.4000us 2 3.7000us 400ns 7.0000us cuDeviceGet 0.00% 2.2000us 3 733ns 300ns 1.1000us cuDeviceGetCount 0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us cuDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 500ns 1 500ns 500ns 500ns cuDeviceGetUuid Vector size 262144 sumArraysOnCPU - Time elapsed 0.001112: sumArraysOnGPU <<<512,512>>> Time elapsed 0.000413sec Arrays match. ==5896== Profiling application: a 512 ==5896== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 46.63% 157.70us 1 157.70us 157.70us 157.70us [CUDA memcpy HtoD] 46.46% 157.12us 1 157.12us 157.12us 157.12us [CUDA memcpy DtoH] 6.91% 23.360us 1 23.360us 23.360us 23.360us switchOnGPU(float*, float*, int) API calls: 73.60% 209.67ms 2 104.83ms 12.300us 209.65ms cudaMalloc 25.22% 71.841ms 1 71.841ms 71.841ms 71.841ms cudaDeviceReset 0.42% 1.2078ms 2 603.90us 243.60us 964.20us cudaMemcpy 0.24% 693.50us 97 7.1490us 200ns 315.50us cuDeviceGetAttribute 0.21% 595.20us 1 595.20us 595.20us 595.20us cudaGetDeviceProperties 0.12% 346.60us 1 346.60us 346.60us 346.60us cudaDeviceSynchronize 0.12% 334.90us 2 167.45us 87.400us 247.50us cudaFree 0.02% 69.800us 1 69.800us 69.800us 69.800us cuDeviceTotalMem 0.02% 61.600us 1 61.600us 61.600us 61.600us cudaLaunchKernel 0.00% 13.500us 1 13.500us 13.500us 13.500us cuDeviceGetPCIBusId 0.00% 12.700us 1 12.700us 12.700us 12.700us cudaSetDevice 0.00% 7.1000us 2 3.5500us 400ns 6.7000us cuDeviceGet 0.00% 2.6000us 3 866ns 400ns 1.2000us cuDeviceGetCount 0.00% 1.6000us 1 1.6000us 1.6000us 1.6000us cuDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetUuid Vector size 262144 sumArraysOnCPU - Time elapsed 0.001207: sumArraysOnGPU <<<256,1024>>> Time elapsed 0.000417sec Arrays match. ==7556== Profiling application: a 1024 ==7556== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 46.36% 157.47us 1 157.47us 157.47us 157.47us [CUDA memcpy HtoD] 46.26% 157.12us 1 157.12us 157.12us 157.12us [CUDA memcpy DtoH] 7.38% 25.056us 1 25.056us 25.056us 25.056us switchOnGPU(float*, float*, int) API calls: 72.98% 211.48ms 2 105.74ms 14.800us 211.46ms cudaMalloc 25.85% 74.896ms 1 74.896ms 74.896ms 74.896ms cudaDeviceReset 0.44% 1.2658ms 2 632.90us 255.80us 1.0100ms cudaMemcpy 0.24% 686.70us 97 7.0790us 200ns 312.90us cuDeviceGetAttribute 0.21% 614.60us 1 614.60us 614.60us 614.60us cudaGetDeviceProperties 0.12% 350.90us 1 350.90us 350.90us 350.90us cudaDeviceSynchronize 0.12% 341.80us 2 170.90us 81.700us 260.10us cudaFree 0.02% 62.000us 1 62.000us 62.000us 62.000us cudaLaunchKernel 0.01% 41.000us 1 41.000us 41.000us 41.000us cuDeviceTotalMem 0.00% 13.600us 1 13.600us 13.600us 13.600us cuDeviceGetPCIBusId 0.00% 13.100us 1 13.100us 13.100us 13.100us cudaSetDevice 0.00% 6.9000us 2 3.4500us 400ns 6.5000us cuDeviceGet 0.00% 2.2000us 3 733ns 300ns 1.2000us cuDeviceGetCount 0.00% 1.3000us 1 1.3000us 1.3000us 1.3000us cuDeviceGetName 0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetLuid 0.00% 500ns 1 500ns 500ns 500ns cuDeviceGetUuid */
7244a9bdf8b7feed02f49d7a089d603951aa9ec9.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensor.hpp> #include <THH/generic/THHTensorMathReduce.hip> #include <THH/THHGenerateBFloat16Type.h>
7244a9bdf8b7feed02f49d7a089d603951aa9ec9.cu
#include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensor.hpp> #include <THC/generic/THCTensorMathReduce.cu> #include <THC/THCGenerateBFloat16Type.h>
72740cbc7e29b5626da17e141ee6f3e91022845a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This software is Copyright (c) 2011,2012 Lukas Odzioba <ukasz at openwall dot net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include "../cuda_cryptmd5.h" #include "cuda_common.cuh" extern "C" void md5_crypt_gpu(crypt_md5_password *, uint32_t *, crypt_md5_salt *, int count); __device__ __constant__ char md5_salt_prefix_cu[] = "$1$"; __device__ __constant__ char apr1_salt_prefix_cu[] = "$apr1$"; __device__ __constant__ crypt_md5_salt cuda_salt[1]; __device__ void md5_process_block_cu(const void *, size_t, md5_ctx *); __device__ void md5_process_bytes_cu(const void *, size_t, md5_ctx *); __device__ void ctx_init(md5_ctx * ctx, uint8_t * ctx_buflen) { uint32_t *buf = (uint32_t *) ctx->buffer; int i = 14; while (i--) *buf++ = 0; *ctx_buflen = 0; } __device__ void ctx_update(md5_ctx * ctx, const char *string, uint8_t len, uint8_t * ctx_buflen) { uint8_t *dest = &ctx->buffer[*ctx_buflen]; uint8_t *src = (uint8_t *) string; *ctx_buflen += len; memcpy(dest, src, len); } __device__ void md5_digest(md5_ctx * ctx, uint32_t * result, uint8_t * ctx_buflen) { uint32_t len = *ctx_buflen; uint32_t *x = (uint32_t *) ctx->buffer; x[len / 4] |= (((uint32_t) 0x80) << ((len & 0x3) << 3)); len <<= 3; uint32_t b = 0xefcdab89; uint32_t c = 0x98badcfe; uint32_t d = 0x10325476; uint32_t a = ROTATE_LEFT(AC1 + x[0], S11); a += b; /* 1 */ d = ROTATE_LEFT((c ^ (a & MASK1)) + x[1] + AC2pCd, S12); d += a; /* 2 */ c = ROTATE_LEFT(F(d, a, b) + x[2] + AC3pCc, S13); c += d; /* 3 */ b = ROTATE_LEFT(F(c, d, a) + x[3] + AC4pCb, S14); b += c; /* 4 */ FF(a, b, c, d, x[4], S11, 0xf57c0faf); /* 5 */ FF(d, a, b, c, x[5], S12, 0x4787c62a); /* 6 */ FF(c, d, a, b, x[6], S13, 0xa8304613); /* 7 */ FF(b, c, d, a, x[7], S14, 0xfd469501); /* 8 */ FF(a, b, c, d, x[8], S11, 0x698098d8); /* 9 */ FF(d, a, b, c, x[9], S12, 0x8b44f7af); /* 10 */ FF(c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF(b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF(a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF(d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF(c, d, a, b, len, S13, 0xa679438e); /* 15 */ FF2(b, c, d, a, S14, 0x49b40821); /* 16 */ GG(a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */ GG(d, a, b, c, x[6], S22, 0xc040b340); /* 18 */ GG(c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */ GG(a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */ GG(d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG2(c, d, a, b, S23, 0xd8a1e681); /* 23 */ GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */ GG(a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */ GG(d, a, b, c, len, S22, 0xc33707d6); /* 26 */ GG(c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */ GG(b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */ GG(a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG(d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */ GG(c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */ GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ HH(a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */ HH(d, a, b, c, x[8], S32, 0x8771f681); /* 34 */ HH(c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH(b, c, d, a, len, S34, 0xfde5380c); /* 36 */ HH(a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */ HH(d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */ HH(c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */ HH(b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH(a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH(d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */ HH(c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */ HH(b, c, d, a, x[6], S34, 0x4881d05); /* 44 */ HH(a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */ HH(d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH2(c, d, a, b, S33, 0x1fa27cf8); /* 47 */ HH(b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */ II(a, b, c, d, x[0], S41, 0xf4292244); /* 49 */ II(d, a, b, c, x[7], S42, 0x432aff97); /* 50 */ II(c, d, a, b, len, S43, 0xab9423a7); /* 51 */ II(b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */ II(a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II(d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */ II(c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II(b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */ II(a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */ II2(d, a, b, c, S42, 0xfe2ce6e0); /* 58 */ II(c, d, a, b, x[6], S43, 0xa3014314); /* 59 */ II(b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II(a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */ II(d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II(c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */ II(b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */ result[0] = a + 0x67452301; result[1] = b + 0xefcdab89; result[2] = c + 0x98badcfe; result[3] = d + 0x10325476; } __device__ void md5crypt(const char *gpass, size_t keysize, char *result) { uint32_t i; __shared__ uint32_t alt_result[THREADS][4 + 1]; __shared__ char spass[THREADS][16 + 4]; uint8_t ctx_buflen; char *pass = spass[threadIdx.x]; memcpy(pass, gpass, 15); uint8_t pass_len = keysize; uint8_t salt_len = cuda_salt[0].length; char *salt = cuda_salt[0].salt; md5_ctx ctx; ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); if (cuda_salt[0].prefix == '1') { ctx_update(&ctx, md5_salt_prefix_cu, 3, &ctx_buflen); } else ctx_update(&ctx, apr1_salt_prefix_cu, 6, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, (const char *) alt_result[threadIdx.x], pass_len, &ctx_buflen); *alt_result[threadIdx.x] = 0; for (i = pass_len; i > 0; i >>= 1) if ((i & 1) != 0) ctx.buffer[ctx_buflen++] = ((const char *) alt_result[threadIdx.x])[0]; else ctx.buffer[ctx_buflen++] = pass[0]; md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); for (i = 0; i < 1000; i++) { ctx_init(&ctx, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); else ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); if (i % 3 != 0) ctx_update(&ctx, salt, salt_len, &ctx_buflen); if (i % 7 != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); else ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); } char cracked = 1; cracked &= (alt_result[threadIdx.x][0] == cuda_salt[0].hash[0]); cracked &= (alt_result[threadIdx.x][1] == cuda_salt[0].hash[1]); cracked &= (alt_result[threadIdx.x][2] == cuda_salt[0].hash[2]); cracked &= (alt_result[threadIdx.x][3] == cuda_salt[0].hash[3]); *result = cracked; } __global__ void kernel_crypt_r(crypt_md5_password * inbuffer, crypt_md5_crack * outbuffer) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; md5crypt((char *) inbuffer[idx].v, inbuffer[idx].length, &outbuffer[idx].cracked); } __host__ void md5_crypt_gpu(crypt_md5_password * inbuffer, uint32_t * outbuffer, crypt_md5_salt * host_salt, int count) { int blocks = (count + THREADS - 1) / THREADS; HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, host_salt, sizeof(crypt_md5_salt))); crypt_md5_password *cuda_inbuffer; crypt_md5_crack *cuda_outbuffer; size_t insize = sizeof(crypt_md5_password) * KEYS_PER_CRYPT; size_t outsize = sizeof(crypt_md5_crack) * KEYS_PER_CRYPT; HANDLE_ERROR(hipMalloc(&cuda_inbuffer, insize)); HANDLE_ERROR(hipMalloc(&cuda_outbuffer, outsize)); HANDLE_ERROR(hipMemcpy(cuda_inbuffer, inbuffer, insize, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_crypt_r) , dim3(blocks), dim3(THREADS) , 0, 0, cuda_inbuffer, cuda_outbuffer); HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipMemcpy(outbuffer, cuda_outbuffer, outsize, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipFree(cuda_inbuffer)); HANDLE_ERROR(hipFree(cuda_outbuffer)); }
72740cbc7e29b5626da17e141ee6f3e91022845a.cu
/* * This software is Copyright (c) 2011,2012 Lukas Odzioba <ukasz at openwall dot net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include "../cuda_cryptmd5.h" #include "cuda_common.cuh" extern "C" void md5_crypt_gpu(crypt_md5_password *, uint32_t *, crypt_md5_salt *, int count); __device__ __constant__ char md5_salt_prefix_cu[] = "$1$"; __device__ __constant__ char apr1_salt_prefix_cu[] = "$apr1$"; __device__ __constant__ crypt_md5_salt cuda_salt[1]; __device__ void md5_process_block_cu(const void *, size_t, md5_ctx *); __device__ void md5_process_bytes_cu(const void *, size_t, md5_ctx *); __device__ void ctx_init(md5_ctx * ctx, uint8_t * ctx_buflen) { uint32_t *buf = (uint32_t *) ctx->buffer; int i = 14; while (i--) *buf++ = 0; *ctx_buflen = 0; } __device__ void ctx_update(md5_ctx * ctx, const char *string, uint8_t len, uint8_t * ctx_buflen) { uint8_t *dest = &ctx->buffer[*ctx_buflen]; uint8_t *src = (uint8_t *) string; *ctx_buflen += len; memcpy(dest, src, len); } __device__ void md5_digest(md5_ctx * ctx, uint32_t * result, uint8_t * ctx_buflen) { uint32_t len = *ctx_buflen; uint32_t *x = (uint32_t *) ctx->buffer; x[len / 4] |= (((uint32_t) 0x80) << ((len & 0x3) << 3)); len <<= 3; uint32_t b = 0xefcdab89; uint32_t c = 0x98badcfe; uint32_t d = 0x10325476; uint32_t a = ROTATE_LEFT(AC1 + x[0], S11); a += b; /* 1 */ d = ROTATE_LEFT((c ^ (a & MASK1)) + x[1] + AC2pCd, S12); d += a; /* 2 */ c = ROTATE_LEFT(F(d, a, b) + x[2] + AC3pCc, S13); c += d; /* 3 */ b = ROTATE_LEFT(F(c, d, a) + x[3] + AC4pCb, S14); b += c; /* 4 */ FF(a, b, c, d, x[4], S11, 0xf57c0faf); /* 5 */ FF(d, a, b, c, x[5], S12, 0x4787c62a); /* 6 */ FF(c, d, a, b, x[6], S13, 0xa8304613); /* 7 */ FF(b, c, d, a, x[7], S14, 0xfd469501); /* 8 */ FF(a, b, c, d, x[8], S11, 0x698098d8); /* 9 */ FF(d, a, b, c, x[9], S12, 0x8b44f7af); /* 10 */ FF(c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF(b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF(a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF(d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF(c, d, a, b, len, S13, 0xa679438e); /* 15 */ FF2(b, c, d, a, S14, 0x49b40821); /* 16 */ GG(a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */ GG(d, a, b, c, x[6], S22, 0xc040b340); /* 18 */ GG(c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */ GG(a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */ GG(d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG2(c, d, a, b, S23, 0xd8a1e681); /* 23 */ GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */ GG(a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */ GG(d, a, b, c, len, S22, 0xc33707d6); /* 26 */ GG(c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */ GG(b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */ GG(a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG(d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */ GG(c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */ GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ HH(a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */ HH(d, a, b, c, x[8], S32, 0x8771f681); /* 34 */ HH(c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH(b, c, d, a, len, S34, 0xfde5380c); /* 36 */ HH(a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */ HH(d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */ HH(c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */ HH(b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH(a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH(d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */ HH(c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */ HH(b, c, d, a, x[6], S34, 0x4881d05); /* 44 */ HH(a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */ HH(d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH2(c, d, a, b, S33, 0x1fa27cf8); /* 47 */ HH(b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */ II(a, b, c, d, x[0], S41, 0xf4292244); /* 49 */ II(d, a, b, c, x[7], S42, 0x432aff97); /* 50 */ II(c, d, a, b, len, S43, 0xab9423a7); /* 51 */ II(b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */ II(a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II(d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */ II(c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II(b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */ II(a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */ II2(d, a, b, c, S42, 0xfe2ce6e0); /* 58 */ II(c, d, a, b, x[6], S43, 0xa3014314); /* 59 */ II(b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II(a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */ II(d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II(c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */ II(b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */ result[0] = a + 0x67452301; result[1] = b + 0xefcdab89; result[2] = c + 0x98badcfe; result[3] = d + 0x10325476; } __device__ void md5crypt(const char *gpass, size_t keysize, char *result) { uint32_t i; __shared__ uint32_t alt_result[THREADS][4 + 1]; __shared__ char spass[THREADS][16 + 4]; uint8_t ctx_buflen; char *pass = spass[threadIdx.x]; memcpy(pass, gpass, 15); uint8_t pass_len = keysize; uint8_t salt_len = cuda_salt[0].length; char *salt = cuda_salt[0].salt; md5_ctx ctx; ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); if (cuda_salt[0].prefix == '1') { ctx_update(&ctx, md5_salt_prefix_cu, 3, &ctx_buflen); } else ctx_update(&ctx, apr1_salt_prefix_cu, 6, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, (const char *) alt_result[threadIdx.x], pass_len, &ctx_buflen); *alt_result[threadIdx.x] = 0; for (i = pass_len; i > 0; i >>= 1) if ((i & 1) != 0) ctx.buffer[ctx_buflen++] = ((const char *) alt_result[threadIdx.x])[0]; else ctx.buffer[ctx_buflen++] = pass[0]; md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); for (i = 0; i < 1000; i++) { ctx_init(&ctx, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); else ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); if (i % 3 != 0) ctx_update(&ctx, salt, salt_len, &ctx_buflen); if (i % 7 != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); else ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); } char cracked = 1; cracked &= (alt_result[threadIdx.x][0] == cuda_salt[0].hash[0]); cracked &= (alt_result[threadIdx.x][1] == cuda_salt[0].hash[1]); cracked &= (alt_result[threadIdx.x][2] == cuda_salt[0].hash[2]); cracked &= (alt_result[threadIdx.x][3] == cuda_salt[0].hash[3]); *result = cracked; } __global__ void kernel_crypt_r(crypt_md5_password * inbuffer, crypt_md5_crack * outbuffer) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; md5crypt((char *) inbuffer[idx].v, inbuffer[idx].length, &outbuffer[idx].cracked); } __host__ void md5_crypt_gpu(crypt_md5_password * inbuffer, uint32_t * outbuffer, crypt_md5_salt * host_salt, int count) { int blocks = (count + THREADS - 1) / THREADS; HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, host_salt, sizeof(crypt_md5_salt))); crypt_md5_password *cuda_inbuffer; crypt_md5_crack *cuda_outbuffer; size_t insize = sizeof(crypt_md5_password) * KEYS_PER_CRYPT; size_t outsize = sizeof(crypt_md5_crack) * KEYS_PER_CRYPT; HANDLE_ERROR(cudaMalloc(&cuda_inbuffer, insize)); HANDLE_ERROR(cudaMalloc(&cuda_outbuffer, outsize)); HANDLE_ERROR(cudaMemcpy(cuda_inbuffer, inbuffer, insize, cudaMemcpyHostToDevice)); kernel_crypt_r <<< blocks, THREADS >>> (cuda_inbuffer, cuda_outbuffer); HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaMemcpy(outbuffer, cuda_outbuffer, outsize, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(cuda_inbuffer)); HANDLE_ERROR(cudaFree(cuda_outbuffer)); }
fe89389c8519d131c20626cd1aa17e2114eb816d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_d #define BLOCKSIZE 256 __global__ void magma_dk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_dbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_d_matrix *D, magma_d_matrix *R, const double * __restrict__ b, double * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ double local_x[ BLOCKSIZE ]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_dbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, double * valD, magma_index_t * rowD, magma_index_t * colD, double * valR, magma_index_t * rowR, magma_index_t * colR, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, double * valD4, magma_index_t * rowD4, magma_index_t * colD4, double * valR4, magma_index_t * rowR4, magma_index_t * colR4, double * valD5, magma_index_t * rowD5, magma_index_t * colD5, double * valR5, magma_index_t * rowR5, magma_index_t * colR5, double * valD6, magma_index_t * rowD6, magma_index_t * colD6, double * valR6, magma_index_t * rowR6, magma_index_t * colR6, double * valD7, magma_index_t * rowD7, magma_index_t * colD7, double * valR7, magma_index_t * rowR7, magma_index_t * colR7, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, double *valD32, magma_index_t *rowD32, magma_index_t *colD32, double *valR32, magma_index_t *rowR32, magma_index_t *colR32, double *valD33, magma_index_t *rowD33, magma_index_t *colD33, double *valR33, magma_index_t *rowR33, magma_index_t *colR33, double *valD34, magma_index_t *rowD34, magma_index_t *colD34, double *valR34, magma_index_t *rowR34, magma_index_t *colR34, double *valD35, magma_index_t *rowD35, magma_index_t *colD35, double *valR35, magma_index_t *rowR35, magma_index_t *colR35, double *valD36, magma_index_t *rowD36, magma_index_t *colD36, double *valR36, magma_index_t *rowR36, magma_index_t *colR36, double *valD37, magma_index_t *rowD37, magma_index_t *colD37, double *valR37, magma_index_t *rowR37, magma_index_t *colR37, double *valD38, magma_index_t *rowD38, magma_index_t *colD38, double *valR38, magma_index_t *rowR38, magma_index_t *colR38, double *valD39, magma_index_t *rowD39, magma_index_t *colD39, double *valR39, magma_index_t *rowR39, magma_index_t *colR39, double *valD40, magma_index_t *rowD40, magma_index_t *colD40, double *valR40, magma_index_t *rowR40, magma_index_t *colR40, double *valD41, magma_index_t *rowD41, magma_index_t *colD41, double *valR41, magma_index_t *rowR41, magma_index_t *colR41, double *valD42, magma_index_t *rowD42, magma_index_t *colD42, double *valR42, magma_index_t *rowR42, magma_index_t *colR42, double *valD43, magma_index_t *rowD43, magma_index_t *colD43, double *valR43, magma_index_t *rowR43, magma_index_t *colR43, double *valD44, magma_index_t *rowD44, magma_index_t *colD44, double *valR44, magma_index_t *rowR44, magma_index_t *colR44, double *valD45, magma_index_t *rowD45, magma_index_t *colD45, double *valR45, magma_index_t *rowR45, magma_index_t *colR45, double *valD46, magma_index_t *rowD46, magma_index_t *colD46, double *valR46, magma_index_t *rowR46, magma_index_t *colR46, double *valD47, magma_index_t *rowD47, magma_index_t *colD47, double *valR47, magma_index_t *rowR47, magma_index_t *colR47, double *valD48, magma_index_t *rowD48, magma_index_t *colD48, double *valR48, magma_index_t *rowR48, magma_index_t *colR48, double *valD49, magma_index_t *rowD49, magma_index_t *colD49, double *valR49, magma_index_t *rowR49, magma_index_t *colR49, double *valD50, magma_index_t *rowD50, magma_index_t *colD50, double *valR50, magma_index_t *rowR50, magma_index_t *colR50, double *valD51, magma_index_t *rowD51, magma_index_t *colD51, double *valR51, magma_index_t *rowR51, magma_index_t *colR51, double *valD52, magma_index_t *rowD52, magma_index_t *colD52, double *valR52, magma_index_t *rowR52, magma_index_t *colR52, double *valD53, magma_index_t *rowD53, magma_index_t *colD53, double *valR53, magma_index_t *rowR53, magma_index_t *colR53, double *valD54, magma_index_t *rowD54, magma_index_t *colD54, double *valR54, magma_index_t *rowR54, magma_index_t *colR54, double *valD55, magma_index_t *rowD55, magma_index_t *colD55, double *valR55, magma_index_t *rowR55, magma_index_t *colR55, double *valD56, magma_index_t *rowD56, magma_index_t *colD56, double *valR56, magma_index_t *rowR56, magma_index_t *colR56, double *valD57, magma_index_t *rowD57, magma_index_t *colD57, double *valR57, magma_index_t *rowR57, magma_index_t *colR57, double *valD58, magma_index_t *rowD58, magma_index_t *colD58, double *valR58, magma_index_t *rowR58, magma_index_t *colR58, double *valD59, magma_index_t *rowD59, magma_index_t *colD59, double *valR59, magma_index_t *rowR59, magma_index_t *colR59, double *valD60, magma_index_t *rowD60, magma_index_t *colD60, double *valR60, magma_index_t *rowR60, magma_index_t *colR60, double *valD61, magma_index_t *rowD61, magma_index_t *colD61, double *valR61, magma_index_t *rowR61, magma_index_t *colR61, double *valD62, magma_index_t *rowD62, magma_index_t *colD62, double *valR62, magma_index_t *rowR62, magma_index_t *colR62, double *valD63, magma_index_t *rowD63, magma_index_t *colD63, double *valR63, magma_index_t *rowR63, magma_index_t *colR63, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_d_matrix* set of matrices with diagonal blocks @param[in] R magma_d_matrix* set of matrices with non-diagonal parts @param[in] b magma_d_matrix RHS @param[in] x magma_d_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_d_matrix *D, magma_d_matrix *R, magma_d_matrix b, magma_d_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
fe89389c8519d131c20626cd1aa17e2114eb816d.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_d #define BLOCKSIZE 256 __global__ void magma_dk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_dbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_d_matrix *D, magma_d_matrix *R, const double * __restrict__ b, double * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ double local_x[ BLOCKSIZE ]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_dbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, double * valD, magma_index_t * rowD, magma_index_t * colD, double * valR, magma_index_t * rowR, magma_index_t * colR, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, double * valD4, magma_index_t * rowD4, magma_index_t * colD4, double * valR4, magma_index_t * rowR4, magma_index_t * colR4, double * valD5, magma_index_t * rowD5, magma_index_t * colD5, double * valR5, magma_index_t * rowR5, magma_index_t * colR5, double * valD6, magma_index_t * rowD6, magma_index_t * colD6, double * valR6, magma_index_t * rowR6, magma_index_t * colR6, double * valD7, magma_index_t * rowD7, magma_index_t * colD7, double * valR7, magma_index_t * rowR7, magma_index_t * colR7, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, double *valD32, magma_index_t *rowD32, magma_index_t *colD32, double *valR32, magma_index_t *rowR32, magma_index_t *colR32, double *valD33, magma_index_t *rowD33, magma_index_t *colD33, double *valR33, magma_index_t *rowR33, magma_index_t *colR33, double *valD34, magma_index_t *rowD34, magma_index_t *colD34, double *valR34, magma_index_t *rowR34, magma_index_t *colR34, double *valD35, magma_index_t *rowD35, magma_index_t *colD35, double *valR35, magma_index_t *rowR35, magma_index_t *colR35, double *valD36, magma_index_t *rowD36, magma_index_t *colD36, double *valR36, magma_index_t *rowR36, magma_index_t *colR36, double *valD37, magma_index_t *rowD37, magma_index_t *colD37, double *valR37, magma_index_t *rowR37, magma_index_t *colR37, double *valD38, magma_index_t *rowD38, magma_index_t *colD38, double *valR38, magma_index_t *rowR38, magma_index_t *colR38, double *valD39, magma_index_t *rowD39, magma_index_t *colD39, double *valR39, magma_index_t *rowR39, magma_index_t *colR39, double *valD40, magma_index_t *rowD40, magma_index_t *colD40, double *valR40, magma_index_t *rowR40, magma_index_t *colR40, double *valD41, magma_index_t *rowD41, magma_index_t *colD41, double *valR41, magma_index_t *rowR41, magma_index_t *colR41, double *valD42, magma_index_t *rowD42, magma_index_t *colD42, double *valR42, magma_index_t *rowR42, magma_index_t *colR42, double *valD43, magma_index_t *rowD43, magma_index_t *colD43, double *valR43, magma_index_t *rowR43, magma_index_t *colR43, double *valD44, magma_index_t *rowD44, magma_index_t *colD44, double *valR44, magma_index_t *rowR44, magma_index_t *colR44, double *valD45, magma_index_t *rowD45, magma_index_t *colD45, double *valR45, magma_index_t *rowR45, magma_index_t *colR45, double *valD46, magma_index_t *rowD46, magma_index_t *colD46, double *valR46, magma_index_t *rowR46, magma_index_t *colR46, double *valD47, magma_index_t *rowD47, magma_index_t *colD47, double *valR47, magma_index_t *rowR47, magma_index_t *colR47, double *valD48, magma_index_t *rowD48, magma_index_t *colD48, double *valR48, magma_index_t *rowR48, magma_index_t *colR48, double *valD49, magma_index_t *rowD49, magma_index_t *colD49, double *valR49, magma_index_t *rowR49, magma_index_t *colR49, double *valD50, magma_index_t *rowD50, magma_index_t *colD50, double *valR50, magma_index_t *rowR50, magma_index_t *colR50, double *valD51, magma_index_t *rowD51, magma_index_t *colD51, double *valR51, magma_index_t *rowR51, magma_index_t *colR51, double *valD52, magma_index_t *rowD52, magma_index_t *colD52, double *valR52, magma_index_t *rowR52, magma_index_t *colR52, double *valD53, magma_index_t *rowD53, magma_index_t *colD53, double *valR53, magma_index_t *rowR53, magma_index_t *colR53, double *valD54, magma_index_t *rowD54, magma_index_t *colD54, double *valR54, magma_index_t *rowR54, magma_index_t *colR54, double *valD55, magma_index_t *rowD55, magma_index_t *colD55, double *valR55, magma_index_t *rowR55, magma_index_t *colR55, double *valD56, magma_index_t *rowD56, magma_index_t *colD56, double *valR56, magma_index_t *rowR56, magma_index_t *colR56, double *valD57, magma_index_t *rowD57, magma_index_t *colD57, double *valR57, magma_index_t *rowR57, magma_index_t *colR57, double *valD58, magma_index_t *rowD58, magma_index_t *colD58, double *valR58, magma_index_t *rowR58, magma_index_t *colR58, double *valD59, magma_index_t *rowD59, magma_index_t *colD59, double *valR59, magma_index_t *rowR59, magma_index_t *colR59, double *valD60, magma_index_t *rowD60, magma_index_t *colD60, double *valR60, magma_index_t *rowR60, magma_index_t *colR60, double *valD61, magma_index_t *rowD61, magma_index_t *colD61, double *valR61, magma_index_t *rowR61, magma_index_t *colR61, double *valD62, magma_index_t *rowD62, magma_index_t *colD62, double *valR62, magma_index_t *rowR62, magma_index_t *colR62, double *valD63, magma_index_t *rowD63, magma_index_t *colD63, double *valR63, magma_index_t *rowR63, magma_index_t *colR63, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_d_matrix* set of matrices with diagonal blocks @param[in] R magma_d_matrix* set of matrices with non-diagonal parts @param[in] b magma_d_matrix RHS @param[in] x magma_d_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_d_matrix *D, magma_d_matrix *R, magma_d_matrix b, magma_d_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
b1e00077b4aebb3acc1e4a6527a786a95ee0948f.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Sudoku -- Puzzle Solver on GPU using CUDA // ---------------------------------------------------------------- /** * @file * sudoku.cu * * @brief main sudoku file to init and execute */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // includes, utilities #include "util/error_utils.cuh" #include "util/io_utils.cuh" #include "data.cuh" // includes, kernels //#include "beecolony.cuh" #include "AngelaKernels.cuh" //#include "bfsKernel.cuh" void KernelManager(int n, Square * h_unsolved, bool o_graphics) { /* CUDA event setup */ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /* Memory Allocations */ int memsize = sizeof(Square) * n * n; Square * d_unsolved; ERROR_CHECK( hipMalloc((void**) &d_unsolved, memsize) ); /* IMPORTANT: PLEASE ADD THIS IN YOUR KERNEL MANAGER FUNCTION */ /*ERROR_CHECK( hipMemcpy(d_unsolved, h_unsolved, memsize, hipMemcpyHostToDevice) );*/ /* IMPORTANT: END! */ Square * d_solved; ERROR_CHECK( hipMalloc((void**) &d_solved, memsize) ); // int* d_points; // ERROR_CHECK( hipMalloc((void**) &d_points, sizeof(int)) ); float elapsedTime; hipEventRecord(start, 0); // ArtificialBeeColony (h_unsolved, d_unsolved, d_solved, n); AngelaKernels(h_unsolved, d_unsolved, d_solved, n); // bfsKernel( h_unsolved, d_unsolved, d_solved, n); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); Square * h_solved = (Square *) malloc(memsize); ERROR_CHECK( hipMemcpy(h_solved, d_unsolved, memsize, hipMemcpyDeviceToHost) ); /* Destroy CUDA event */ hipEventDestroy(start); hipEventDestroy(stop); // TODO: Terminal Output will go here. const char * alg = "-ang"; const char * finished = "/********** Angela's (C) **********/"; output(finished, alg, n, false, h_solved); const char* statistics = "/******* Statistics (Begin) ********/"; printf("%s\n", statistics); printf("Elapsed Time: %f (ms)\n", elapsedTime); const char* statistics_end = "/******** Statistics (End) *********/"; printf("%s\n", statistics_end); /* Free Memory Allocations */ free(h_unsolved); ERROR_CHECK( hipFree(d_unsolved) ); ERROR_CHECK( hipFree(d_solved) ); } int main(int argc, char** argv) { /* Gets arguments from command line and puzzle from a file */ CommandLineArgs * build = new CommandLineArgs; input(argc, argv, build); KernelManager((*build).size, (*build).Puzzle, (*build).graphics); }
b1e00077b4aebb3acc1e4a6527a786a95ee0948f.cu
// ---------------------------------------------------------------- // Sudoku -- Puzzle Solver on GPU using CUDA // ---------------------------------------------------------------- /** * @file * sudoku.cu * * @brief main sudoku file to init and execute */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cuda.h> #include <cuda_runtime.h> // includes, utilities #include "util/error_utils.cuh" #include "util/io_utils.cuh" #include "data.cuh" // includes, kernels //#include "beecolony.cuh" #include "AngelaKernels.cuh" //#include "bfsKernel.cuh" void KernelManager(int n, Square * h_unsolved, bool o_graphics) { /* CUDA event setup */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /* Memory Allocations */ int memsize = sizeof(Square) * n * n; Square * d_unsolved; ERROR_CHECK( cudaMalloc((void**) &d_unsolved, memsize) ); /* IMPORTANT: PLEASE ADD THIS IN YOUR KERNEL MANAGER FUNCTION */ /*ERROR_CHECK( cudaMemcpy(d_unsolved, h_unsolved, memsize, cudaMemcpyHostToDevice) );*/ /* IMPORTANT: END! */ Square * d_solved; ERROR_CHECK( cudaMalloc((void**) &d_solved, memsize) ); // int* d_points; // ERROR_CHECK( cudaMalloc((void**) &d_points, sizeof(int)) ); float elapsedTime; cudaEventRecord(start, 0); // ArtificialBeeColony (h_unsolved, d_unsolved, d_solved, n); AngelaKernels(h_unsolved, d_unsolved, d_solved, n); // bfsKernel( h_unsolved, d_unsolved, d_solved, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); Square * h_solved = (Square *) malloc(memsize); ERROR_CHECK( cudaMemcpy(h_solved, d_unsolved, memsize, cudaMemcpyDeviceToHost) ); /* Destroy CUDA event */ cudaEventDestroy(start); cudaEventDestroy(stop); // TODO: Terminal Output will go here. const char * alg = "-ang"; const char * finished = "/********** Angela's (C) **********/"; output(finished, alg, n, false, h_solved); const char* statistics = "/******* Statistics (Begin) ********/"; printf("%s\n", statistics); printf("Elapsed Time: %f (ms)\n", elapsedTime); const char* statistics_end = "/******** Statistics (End) *********/"; printf("%s\n", statistics_end); /* Free Memory Allocations */ free(h_unsolved); ERROR_CHECK( cudaFree(d_unsolved) ); ERROR_CHECK( cudaFree(d_solved) ); } int main(int argc, char** argv) { /* Gets arguments from command line and puzzle from a file */ CommandLineArgs * build = new CommandLineArgs; input(argc, argv, build); KernelManager((*build).size, (*build).Puzzle, (*build).graphics); }
919a37cbdf83c53a024d05cd090c2f687971e6b4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <map> #include <set> #include <vector> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <hip/hip_runtime.h> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 void safe_call(hipError_t ret, int line) { if(ret!=hipSuccess) { printf("Error at line %d : %s\n",line,hipGetErrorString(ret)); exit(-1); } } typedef struct __graph { int V; int *adj_prefix_sum; int *adj; } graph_t; __device__ bool d_over; __global__ void reset() { d_over = false; } bool destSort(pair<int,int> i,pair<int,int> j) { return ((i.second<j.second) || (i.second==j.second && i.first<j.first)); } // Print the graph __global__ void temp_kernel(graph_t * graph) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id == 0) { int j; for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++) printf("%d ",graph->adj[j]); printf("\n"); } } __global__ void init(int * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v] = 0; else if(v < num_vertices) vertices[v] = -1; } __global__ void bfs(const graph_t * graph, int * vertices) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < graph->V) { if(vertices[id] == -1) { int minLevel = INT_MAX; int i; if(id == 0) i = 0; else i = graph->adj_prefix_sum[id-1]; for(; i < graph->adj_prefix_sum[id]; i++) { int t=vertices[graph->adj[i]]; if(t < minLevel) minLevel = t; } if(minLevel >= 0) { vertices[id] = minLevel+1; d_over = true; } } } } int main(int argc, char * argv[]) { static char * filename; if(argc>2) { printf("./a.out <filename>\n"); exit(-1); } else if(argc==2) { filename = argv[1]; } else { filename = "../data/input.txt"; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(hipSetDevice(0)); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j; fscanf(fp,"%d %d",&num_vertices,&num_edges); graph_t *graph_host; CUDA_SAFE_CALL(hipHostMalloc((void **)&graph_host, sizeof(graph_t))); graph_host->V = num_vertices; CUDA_SAFE_CALL(hipHostMalloc((void **)&(graph_host->adj_prefix_sum), num_vertices*sizeof(int))); CUDA_SAFE_CALL(hipHostMalloc((void **)&(graph_host->adj), num_edges*sizeof(int *))); set<int> vertices; vector< pair<int,int> > edges; int s,d; for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); vertices.insert(s); vertices.insert(d); edges.push_back(make_pair(s,d)); } sort(edges.begin(),edges.end(),destSort); /* At this point, the vertices and edges are sorted by destination. */ i=0; int ps=edges[0].second,idf,last; for(vector< pair<int,int> >::iterator it = edges.begin() ; it != edges.end(); ++it) { int id = distance(vertices.begin(),vertices.find((*it).first)); // C++ set stores in sorted order by default //Storing in-edges instead of out-edges graph_host->adj[i] = id; if(ps != (*it).second) { idf = distance(vertices.begin(),vertices.find(ps)); last = distance(vertices.begin(),vertices.find((*it).second)); for(j=idf;j<last;j++) graph_host->adj_prefix_sum[j] = i; ps = (*it).second; } i++; } idf = distance(vertices.begin(),vertices.find(ps)); graph_host->adj_prefix_sum[idf] = i; for(j=idf;j<num_vertices;j++) graph_host->adj_prefix_sum[j] = i; /***************************************************** XXX: GPU does not know the size of each adjacency list. For that, a new struct containing size of list and list has to be created and passed to GPU memory. Too much hassle. OR Create 1-D array in the graph itself which contains the size of each list. *****************************************************/ //temp_kernel<<<1,1>>>(graph_host); int num_of_blocks = 1; int num_of_threads_per_block = num_vertices; if(num_vertices>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_vertices/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } int * vertices_host; CUDA_SAFE_CALL(hipHostMalloc((void **)&vertices_host, num_vertices*sizeof(int))); dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); hipEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&end)); hipLaunchKernelGGL(( init), dim3(grid),dim3(threads), 0, 0, vertices_host, 0, num_vertices); bool stop; int k=0; do { stop = false; CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &stop, sizeof(bool),0, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipEventRecord(start,0)); hipLaunchKernelGGL(( bfs), dim3(grid), dim3(threads), 0, 0, graph_host, vertices_host); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipEventRecord(end,0)); CUDA_SAFE_CALL(hipEventSynchronize(end)); CUDA_SAFE_CALL(hipEventElapsedTime(&diff, start, end)); time += diff*1.0e-3; CUDA_SAFE_CALL(hipMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, hipMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i]); } printf("Time: %f ms\n",time); CUDA_SAFE_CALL(hipHostFree(vertices_host)); CUDA_SAFE_CALL(hipHostFree(graph_host->adj)); CUDA_SAFE_CALL(hipHostFree(graph_host->adj_prefix_sum)); CUDA_SAFE_CALL(hipHostFree(graph_host)); CUDA_SAFE_CALL(hipEventDestroy(start)); CUDA_SAFE_CALL(hipEventDestroy(end)); return 0; }
919a37cbdf83c53a024d05cd090c2f687971e6b4.cu
#include <iostream> #include <map> #include <set> #include <vector> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <cuda.h> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } typedef struct __graph { int V; int *adj_prefix_sum; int *adj; } graph_t; __device__ bool d_over; __global__ void reset() { d_over = false; } bool destSort(pair<int,int> i,pair<int,int> j) { return ((i.second<j.second) || (i.second==j.second && i.first<j.first)); } // Print the graph __global__ void temp_kernel(graph_t * graph) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id == 0) { int j; for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++) printf("%d ",graph->adj[j]); printf("\n"); } } __global__ void init(int * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v] = 0; else if(v < num_vertices) vertices[v] = -1; } __global__ void bfs(const graph_t * graph, int * vertices) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < graph->V) { if(vertices[id] == -1) { int minLevel = INT_MAX; int i; if(id == 0) i = 0; else i = graph->adj_prefix_sum[id-1]; for(; i < graph->adj_prefix_sum[id]; i++) { int t=vertices[graph->adj[i]]; if(t < minLevel) minLevel = t; } if(minLevel >= 0) { vertices[id] = minLevel+1; d_over = true; } } } } int main(int argc, char * argv[]) { static char * filename; if(argc>2) { printf("./a.out <filename>\n"); exit(-1); } else if(argc==2) { filename = argv[1]; } else { filename = "../data/input.txt"; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(cudaSetDevice(0)); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j; fscanf(fp,"%d %d",&num_vertices,&num_edges); graph_t *graph_host; CUDA_SAFE_CALL(cudaMallocHost((void **)&graph_host, sizeof(graph_t))); graph_host->V = num_vertices; CUDA_SAFE_CALL(cudaMallocHost((void **)&(graph_host->adj_prefix_sum), num_vertices*sizeof(int))); CUDA_SAFE_CALL(cudaMallocHost((void **)&(graph_host->adj), num_edges*sizeof(int *))); set<int> vertices; vector< pair<int,int> > edges; int s,d; for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); vertices.insert(s); vertices.insert(d); edges.push_back(make_pair(s,d)); } sort(edges.begin(),edges.end(),destSort); /* At this point, the vertices and edges are sorted by destination. */ i=0; int ps=edges[0].second,idf,last; for(vector< pair<int,int> >::iterator it = edges.begin() ; it != edges.end(); ++it) { int id = distance(vertices.begin(),vertices.find((*it).first)); // C++ set stores in sorted order by default //Storing in-edges instead of out-edges graph_host->adj[i] = id; if(ps != (*it).second) { idf = distance(vertices.begin(),vertices.find(ps)); last = distance(vertices.begin(),vertices.find((*it).second)); for(j=idf;j<last;j++) graph_host->adj_prefix_sum[j] = i; ps = (*it).second; } i++; } idf = distance(vertices.begin(),vertices.find(ps)); graph_host->adj_prefix_sum[idf] = i; for(j=idf;j<num_vertices;j++) graph_host->adj_prefix_sum[j] = i; /***************************************************** XXX: GPU does not know the size of each adjacency list. For that, a new struct containing size of list and list has to be created and passed to GPU memory. Too much hassle. OR Create 1-D array in the graph itself which contains the size of each list. *****************************************************/ //temp_kernel<<<1,1>>>(graph_host); int num_of_blocks = 1; int num_of_threads_per_block = num_vertices; if(num_vertices>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_vertices/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } int * vertices_host; CUDA_SAFE_CALL(cudaMallocHost((void **)&vertices_host, num_vertices*sizeof(int))); dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); cudaEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&end)); init<<<grid,threads>>> (vertices_host, 0, num_vertices); bool stop; int k=0; do { stop = false; CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &stop, sizeof(bool),0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); CUDA_SAFE_CALL(cudaEventRecord(start,0)); bfs<<<grid, threads>>> (graph_host, vertices_host); CUDA_SAFE_CALL(cudaDeviceSynchronize()); CUDA_SAFE_CALL(cudaEventRecord(end,0)); CUDA_SAFE_CALL(cudaEventSynchronize(end)); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff, start, end)); time += diff*1.0e-3; CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i]); } printf("Time: %f ms\n",time); CUDA_SAFE_CALL(cudaFreeHost(vertices_host)); CUDA_SAFE_CALL(cudaFreeHost(graph_host->adj)); CUDA_SAFE_CALL(cudaFreeHost(graph_host->adj_prefix_sum)); CUDA_SAFE_CALL(cudaFreeHost(graph_host)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(end)); return 0; }
b88260f8dd003edacdd109b0d84373610f545e61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add(int *a, int *b, int *c){ int tid = blockIdx.x; c[tid] = a[tid] + b[tid]; }
b88260f8dd003edacdd109b0d84373610f545e61.cu
#include "includes.h" __global__ void add(int *a, int *b, int *c){ int tid = blockIdx.x; c[tid] = a[tid] + b[tid]; }
18b5d3693184d7ffcb33e29d03ffcb6467fcb8ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_SZ 16 __global__ void Gelim(float *A, int numvar){/////////////add b back in middle __shared__ float A_s[TILE_SZ][TILE_SZ]; int Tirow = threadIdx.y; int Ticol = threadIdx.x; A_s[Tirow][Ticol] = A[(Tirow * (numvar +1)) + Ticol]; for(int i = 1; i < numvar; i++){ if((Tirow +i) < (numvar)){ float multiplier = A_s[Tirow+i][i-1]/A_s[i-1][i-1]; if(Tirow <= Ticol+1){ A_s[Tirow+i][Ticol] -= (multiplier * A_s[i-1][Ticol]); } else{ A_s[Tirow+i][Ticol] = 0; } __syncthreads(); } //__syncthreads(); } A[Tirow *(numvar+1) +Ticol] = A_s[Tirow][Ticol];/////////////replace a with b } void basicGelim(float *A, int numvar){ ////////add b back in middle dim3 block(numvar+1, numvar, 1); dim3 grid(1,1,1); hipLaunchKernelGGL(( Gelim), dim3(grid),dim3(block), 0, 0, A,numvar);//////////b back in mid } /*void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; }*/ // Initialize thread block and kernel grid dimensions --------------------- /* const unsigned int BLOCK_SIZE = TILE_SZ; dim3 block(BLOCK_SIZE,BLOCK_SIZE); dim3 grid((n+BLOCK_SIZE-1)/BLOCK_SIZE,(m+BLOCK_SIZE-1)/BLOCK_SIZE); //INSERT CODE HERE // Invoke CUDA kernel ----------------------------------------------------- mysgemm<<<grid,block>>>(m,n,k,A,B,C); //INSERT CODE HERE dim3 block(numvar+1,numvar,1); dim3 grid(1,1,1); }*/
18b5d3693184d7ffcb33e29d03ffcb6467fcb8ca.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_SZ 16 __global__ void Gelim(float *A, int numvar){/////////////add b back in middle __shared__ float A_s[TILE_SZ][TILE_SZ]; int Tirow = threadIdx.y; int Ticol = threadIdx.x; A_s[Tirow][Ticol] = A[(Tirow * (numvar +1)) + Ticol]; for(int i = 1; i < numvar; i++){ if((Tirow +i) < (numvar)){ float multiplier = A_s[Tirow+i][i-1]/A_s[i-1][i-1]; if(Tirow <= Ticol+1){ A_s[Tirow+i][Ticol] -= (multiplier * A_s[i-1][Ticol]); } else{ A_s[Tirow+i][Ticol] = 0; } __syncthreads(); } //__syncthreads(); } A[Tirow *(numvar+1) +Ticol] = A_s[Tirow][Ticol];/////////////replace a with b } void basicGelim(float *A, int numvar){ ////////add b back in middle dim3 block(numvar+1, numvar, 1); dim3 grid(1,1,1); Gelim<<<grid,block>>>(A,numvar);//////////b back in mid } /*void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; }*/ // Initialize thread block and kernel grid dimensions --------------------- /* const unsigned int BLOCK_SIZE = TILE_SZ; dim3 block(BLOCK_SIZE,BLOCK_SIZE); dim3 grid((n+BLOCK_SIZE-1)/BLOCK_SIZE,(m+BLOCK_SIZE-1)/BLOCK_SIZE); //INSERT CODE HERE // Invoke CUDA kernel ----------------------------------------------------- mysgemm<<<grid,block>>>(m,n,k,A,B,C); //INSERT CODE HERE dim3 block(numvar+1,numvar,1); dim3 grid(1,1,1); }*/
903769d482093d2f6f789a50bc8a0ac9649748c6.hip
// !!! This is a file automatically generated by hipify!!! #include "MeshKernel.h" #include <sutil/vec_math.h> #include <cuda/cuda_noise.cuh> #include <thrust/device_vector.h> #include <thrust/remove.h> #include <math_constants.h> #include <hip/hip_runtime_api.h> #include <hip/hip_vector_types.h> #include <vector_functions.hpp> #define NOISE_STRENGTH 0.1 //Round a / b to nearest higher integer value int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; } __device__ float fBM(int numOctaves, float3 coordinate, float persistence, float scale, float low, float high, int seed) { float maxAmp = 0; float amp = 1; float freq = scale; float noise = 0; // add successively smaller, higher - frequency terms for (int i = 0; i < numOctaves; ++i) { noise += cudaNoise::simplexNoise(coordinate, freq, seed) * amp; maxAmp += amp; amp *= persistence; freq *= 2; } // take the average value of the iterations noise /= maxAmp; // normalize the result noise = noise * (high - low) / 2 + (high + low) / 2; return noise; } __forceinline__ __device__ float3 calculateGerstnerWaveOffset(Wave* waves, int numWaves, float2 gridLocation, float t) { float3 sum = make_float3(0.f); float L, wi, phi, rad, Qi, Ai, cosRad; float2 Di; for (int i = 0; i < numWaves; i++) { Qi = waves[i].steepness; Ai = waves[i].amplitude; L = waves[i].waveLength; wi = 2 / L; Di = make_float2(cos(waves[i].direction), sin(waves[i].direction)); phi = waves[i].speed * 2 / L; rad = wi * dot(Di, gridLocation) + phi * t; cosRad = cos(rad); sum.x += Qi * Ai * Di.x * cosRad; sum.y += Ai * sin(rad); sum.z += Qi * Ai * Di.y * cosRad; } return sum; } __forceinline__ __device__ float3 calculateGerstnerWaveNormal(Wave* waves, int numWaves, float2 posPlane, float t) { float3 sum = make_float3(0.f, 1.f, 0.f); float L, wi, phi, rad, Qi, Ai, WA, cosRad, sinRad; float2 Di; for (int i = 0; i < numWaves; i++) { Qi = waves[i].steepness; Ai = waves[i].amplitude; L = waves[i].waveLength; wi = 2 / L; WA = wi * Ai; Di = make_float2(cos(waves[i].direction), sin(waves[i].direction)); phi = waves[i].speed * 2 / L; rad = wi * dot(Di, posPlane) + phi * t; cosRad = cos(rad); sinRad = sin(rad); sum.x += -Di.x * WA * cosRad; sum.y += -Qi * WA * sinRad; sum.z += -Di.y * WA * cosRad; } return normalize(sum); } __forceinline__ __device__ float3 calculateGerstnerWavePosition(Wave* waves, int numWaves, float3 samplePoint, float t) { float2 gridLocation = make_float2(samplePoint.x, samplePoint.z); float3 newPos = make_float3(gridLocation.x, 0.f, gridLocation.y) + calculateGerstnerWaveOffset(waves, numWaves, gridLocation, t); //float noise = fBM(1, newPos, 0.5, 0.03, -NOISE_STRENGTH, NOISE_STRENGTH, 9); //newPos.y += noise; return newPos; } __forceinline__ __device__ float4 calculateSample(ProjectedGrid* projectedGrid, unsigned int tx, unsigned int ty) { float4 result; float u = tx * projectedGrid->du; float v = ty * projectedGrid->dv; result = (1.0f - v) * ((1.0f - u) * projectedGrid->corners[0] + u * projectedGrid->corners[1]) + v * ((1.0f - u) * projectedGrid->corners[2] + u * projectedGrid->corners[3]); result /= result.w; result.w = (1.0f - v) * ((1.0f - u) * projectedGrid->distances[0] + u * projectedGrid->distances[1]) + v * ((1.0f - u) * projectedGrid->distances[2] + u * projectedGrid->distances[3]); return result; } __forceinline__ __device__ float calculateWaveAttenuation(float d, float dmin, float dmax) { // Quadratic curve that is 1 at dmin and 0 at dmax // Constant 1 for less than dmin, constant 0 for more than dmax if (d > dmax) return 0.f; else { return saturate((1.f / ((dmin - dmax) * (dmin - dmax))) * ((d - dmax) * (d - dmax))); } } __global__ void generateGridMesh(MeshBuffer meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; int numSamplesX = projectedGrid.samplesU; int numSamplesY = projectedGrid.samplesV; int X = numSamplesX - 1; int Y = numSamplesY - 1; if (tx > X || ty > Y) return; unsigned int indexVertex = tx * numSamplesY + ty; float4 samplePoint = calculateSample(&projectedGrid, tx, ty); float fade = calculateWaveAttenuation(samplePoint.w, 0, projectedGrid.zfar); float3 pos = calculateGerstnerWavePosition(waves, numWaves, make_float3(samplePoint), t); pos.y *= fade; meshBuffer.pos[indexVertex] = pos; //meshBuffer.normal[indexVertex] = calculateGerstnerWaveNormal(waves, numWaves, make_float2(pos.x, pos.z), t); if (tx < X && ty < Y) { int indexIndices = 6 * (tx * X + ty); meshBuffer.indices[indexIndices] = indexVertex; meshBuffer.indices[indexIndices + 1] = indexVertex + numSamplesY; meshBuffer.indices[indexIndices + 2] = indexVertex + numSamplesY + 1; meshBuffer.indices[indexIndices + 3] = indexVertex; meshBuffer.indices[indexIndices + 4] = indexVertex + numSamplesY + 1; meshBuffer.indices[indexIndices + 5] = indexVertex + 1; } } __global__ void calculateNormalDuDv(MeshBuffer meshBuffer, ProjectedGrid projectedGrid) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; int numSamplesX = projectedGrid.samplesU; int numSamplesY = projectedGrid.samplesV; int X = numSamplesX - 1; int Y = numSamplesY - 1; if (tx > X || ty > Y) return; unsigned int indexVertex = tx * numSamplesY + ty; float3 v1 = make_float3(0.f, 0.f, 1.f); float3 v2 = make_float3(1.f, 0.f, 0.f); if (tx > 0 && ty > 0 && tx < X && ty < Y) { int ixp1 = (tx + 1) * numSamplesY + ty; int ixm1 = (tx - 1) * numSamplesY + ty; int iyp1 = tx * numSamplesY + ty + 1; int iym1 = tx * numSamplesY + ty - 1; float3 xp1 = meshBuffer.pos[ixp1]; float3 xm1 = meshBuffer.pos[ixm1]; float3 yp1 = meshBuffer.pos[iyp1]; float3 ym1 = meshBuffer.pos[iym1]; v1.x = xp1.x - xm1.x; v1.y = xp1.y - xm1.y; v1.z = xp1.z - xm1.z; v2.x = yp1.x - ym1.x; v2.y = yp1.y - ym1.y; v2.z = yp1.z - ym1.z; } meshBuffer.normal[indexVertex] = cross(v1, v2); } void cudaGenerateGridMesh(MeshBuffer& meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { dim3 block(16, 16, 1); dim3 grid(cuda_iDivUp(projectedGrid.samplesU, block.x), cuda_iDivUp(projectedGrid.samplesV, block.y), 1); generateGridMesh << <grid, block, 0, 0 >> > (meshBuffer, waves, numWaves, projectedGrid, t); calculateNormalDuDv << <grid, block, 0, 0 >> > (meshBuffer, projectedGrid); } __global__ void updateGridMesh(MeshBuffer meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; int numSamplesX = projectedGrid.samplesU; int numSamplesY = projectedGrid.samplesV; int X = numSamplesX - 1; int Y = numSamplesY - 1; if (tx > X || ty > Y) return; unsigned int indexVertex = tx * numSamplesY + ty; float4 samplePoint = calculateSample(&projectedGrid, tx, ty); //float fade = 0.2* calculateWaveAttenuation(samplePoint.w, projectedGrid.zfar * 0.3, projectedGrid.zfar); float3 pos = calculateGerstnerWavePosition(waves, numWaves, make_float3(samplePoint), t); //pos.y *= fade; meshBuffer.pos[indexVertex] = pos; float3 normal = calculateGerstnerWaveNormal(waves, numWaves, make_float2(pos.x, pos.z), t); //meshBuffer.normal[indexVertex] = normal;// normalize(make_float3(normal.x * fade, normal.y, normal.z * fade)); } void cudaUpdateGridMesh(MeshBuffer& meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { dim3 block(16, 16, 1); dim3 grid(cuda_iDivUp(projectedGrid.samplesU, block.x), cuda_iDivUp(projectedGrid.samplesV, block.y), 1); updateGridMesh << <grid, block, 0, 0 >> > (meshBuffer, waves, numWaves, projectedGrid, t); calculateNormalDuDv << <grid, block, 0, 0 >> > (meshBuffer, projectedGrid); }
903769d482093d2f6f789a50bc8a0ac9649748c6.cu
#include "MeshKernel.h" #include <sutil/vec_math.h> #include <cuda/cuda_noise.cuh> #include <thrust/device_vector.h> #include <thrust/remove.h> #include <math_constants.h> #include <cuda_runtime_api.h> #include <vector_types.h> #include <vector_functions.hpp> #define NOISE_STRENGTH 0.1 //Round a / b to nearest higher integer value int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; } __device__ float fBM(int numOctaves, float3 coordinate, float persistence, float scale, float low, float high, int seed) { float maxAmp = 0; float amp = 1; float freq = scale; float noise = 0; // add successively smaller, higher - frequency terms for (int i = 0; i < numOctaves; ++i) { noise += cudaNoise::simplexNoise(coordinate, freq, seed) * amp; maxAmp += amp; amp *= persistence; freq *= 2; } // take the average value of the iterations noise /= maxAmp; // normalize the result noise = noise * (high - low) / 2 + (high + low) / 2; return noise; } __forceinline__ __device__ float3 calculateGerstnerWaveOffset(Wave* waves, int numWaves, float2 gridLocation, float t) { float3 sum = make_float3(0.f); float L, wi, phi, rad, Qi, Ai, cosRad; float2 Di; for (int i = 0; i < numWaves; i++) { Qi = waves[i].steepness; Ai = waves[i].amplitude; L = waves[i].waveLength; wi = 2 / L; Di = make_float2(cos(waves[i].direction), sin(waves[i].direction)); phi = waves[i].speed * 2 / L; rad = wi * dot(Di, gridLocation) + phi * t; cosRad = cos(rad); sum.x += Qi * Ai * Di.x * cosRad; sum.y += Ai * sin(rad); sum.z += Qi * Ai * Di.y * cosRad; } return sum; } __forceinline__ __device__ float3 calculateGerstnerWaveNormal(Wave* waves, int numWaves, float2 posPlane, float t) { float3 sum = make_float3(0.f, 1.f, 0.f); float L, wi, phi, rad, Qi, Ai, WA, cosRad, sinRad; float2 Di; for (int i = 0; i < numWaves; i++) { Qi = waves[i].steepness; Ai = waves[i].amplitude; L = waves[i].waveLength; wi = 2 / L; WA = wi * Ai; Di = make_float2(cos(waves[i].direction), sin(waves[i].direction)); phi = waves[i].speed * 2 / L; rad = wi * dot(Di, posPlane) + phi * t; cosRad = cos(rad); sinRad = sin(rad); sum.x += -Di.x * WA * cosRad; sum.y += -Qi * WA * sinRad; sum.z += -Di.y * WA * cosRad; } return normalize(sum); } __forceinline__ __device__ float3 calculateGerstnerWavePosition(Wave* waves, int numWaves, float3 samplePoint, float t) { float2 gridLocation = make_float2(samplePoint.x, samplePoint.z); float3 newPos = make_float3(gridLocation.x, 0.f, gridLocation.y) + calculateGerstnerWaveOffset(waves, numWaves, gridLocation, t); //float noise = fBM(1, newPos, 0.5, 0.03, -NOISE_STRENGTH, NOISE_STRENGTH, 9); //newPos.y += noise; return newPos; } __forceinline__ __device__ float4 calculateSample(ProjectedGrid* projectedGrid, unsigned int tx, unsigned int ty) { float4 result; float u = tx * projectedGrid->du; float v = ty * projectedGrid->dv; result = (1.0f - v) * ((1.0f - u) * projectedGrid->corners[0] + u * projectedGrid->corners[1]) + v * ((1.0f - u) * projectedGrid->corners[2] + u * projectedGrid->corners[3]); result /= result.w; result.w = (1.0f - v) * ((1.0f - u) * projectedGrid->distances[0] + u * projectedGrid->distances[1]) + v * ((1.0f - u) * projectedGrid->distances[2] + u * projectedGrid->distances[3]); return result; } __forceinline__ __device__ float calculateWaveAttenuation(float d, float dmin, float dmax) { // Quadratic curve that is 1 at dmin and 0 at dmax // Constant 1 for less than dmin, constant 0 for more than dmax if (d > dmax) return 0.f; else { return saturate((1.f / ((dmin - dmax) * (dmin - dmax))) * ((d - dmax) * (d - dmax))); } } __global__ void generateGridMesh(MeshBuffer meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; int numSamplesX = projectedGrid.samplesU; int numSamplesY = projectedGrid.samplesV; int X = numSamplesX - 1; int Y = numSamplesY - 1; if (tx > X || ty > Y) return; unsigned int indexVertex = tx * numSamplesY + ty; float4 samplePoint = calculateSample(&projectedGrid, tx, ty); float fade = calculateWaveAttenuation(samplePoint.w, 0, projectedGrid.zfar); float3 pos = calculateGerstnerWavePosition(waves, numWaves, make_float3(samplePoint), t); pos.y *= fade; meshBuffer.pos[indexVertex] = pos; //meshBuffer.normal[indexVertex] = calculateGerstnerWaveNormal(waves, numWaves, make_float2(pos.x, pos.z), t); if (tx < X && ty < Y) { int indexIndices = 6 * (tx * X + ty); meshBuffer.indices[indexIndices] = indexVertex; meshBuffer.indices[indexIndices + 1] = indexVertex + numSamplesY; meshBuffer.indices[indexIndices + 2] = indexVertex + numSamplesY + 1; meshBuffer.indices[indexIndices + 3] = indexVertex; meshBuffer.indices[indexIndices + 4] = indexVertex + numSamplesY + 1; meshBuffer.indices[indexIndices + 5] = indexVertex + 1; } } __global__ void calculateNormalDuDv(MeshBuffer meshBuffer, ProjectedGrid projectedGrid) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; int numSamplesX = projectedGrid.samplesU; int numSamplesY = projectedGrid.samplesV; int X = numSamplesX - 1; int Y = numSamplesY - 1; if (tx > X || ty > Y) return; unsigned int indexVertex = tx * numSamplesY + ty; float3 v1 = make_float3(0.f, 0.f, 1.f); float3 v2 = make_float3(1.f, 0.f, 0.f); if (tx > 0 && ty > 0 && tx < X && ty < Y) { int ixp1 = (tx + 1) * numSamplesY + ty; int ixm1 = (tx - 1) * numSamplesY + ty; int iyp1 = tx * numSamplesY + ty + 1; int iym1 = tx * numSamplesY + ty - 1; float3 xp1 = meshBuffer.pos[ixp1]; float3 xm1 = meshBuffer.pos[ixm1]; float3 yp1 = meshBuffer.pos[iyp1]; float3 ym1 = meshBuffer.pos[iym1]; v1.x = xp1.x - xm1.x; v1.y = xp1.y - xm1.y; v1.z = xp1.z - xm1.z; v2.x = yp1.x - ym1.x; v2.y = yp1.y - ym1.y; v2.z = yp1.z - ym1.z; } meshBuffer.normal[indexVertex] = cross(v1, v2); } void cudaGenerateGridMesh(MeshBuffer& meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { dim3 block(16, 16, 1); dim3 grid(cuda_iDivUp(projectedGrid.samplesU, block.x), cuda_iDivUp(projectedGrid.samplesV, block.y), 1); generateGridMesh << <grid, block, 0, 0 >> > (meshBuffer, waves, numWaves, projectedGrid, t); calculateNormalDuDv << <grid, block, 0, 0 >> > (meshBuffer, projectedGrid); } __global__ void updateGridMesh(MeshBuffer meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; int numSamplesX = projectedGrid.samplesU; int numSamplesY = projectedGrid.samplesV; int X = numSamplesX - 1; int Y = numSamplesY - 1; if (tx > X || ty > Y) return; unsigned int indexVertex = tx * numSamplesY + ty; float4 samplePoint = calculateSample(&projectedGrid, tx, ty); //float fade = 0.2* calculateWaveAttenuation(samplePoint.w, projectedGrid.zfar * 0.3, projectedGrid.zfar); float3 pos = calculateGerstnerWavePosition(waves, numWaves, make_float3(samplePoint), t); //pos.y *= fade; meshBuffer.pos[indexVertex] = pos; float3 normal = calculateGerstnerWaveNormal(waves, numWaves, make_float2(pos.x, pos.z), t); //meshBuffer.normal[indexVertex] = normal;// normalize(make_float3(normal.x * fade, normal.y, normal.z * fade)); } void cudaUpdateGridMesh(MeshBuffer& meshBuffer, Wave* waves, int numWaves, ProjectedGrid projectedGrid, float t) { dim3 block(16, 16, 1); dim3 grid(cuda_iDivUp(projectedGrid.samplesU, block.x), cuda_iDivUp(projectedGrid.samplesV, block.y), 1); updateGridMesh << <grid, block, 0, 0 >> > (meshBuffer, waves, numWaves, projectedGrid, t); calculateNormalDuDv << <grid, block, 0, 0 >> > (meshBuffer, projectedGrid); }
694ba2b214f69c15c39dfd18b13eba88b249ea08.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iostream> #include <chrono> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" __global__ void kernal_fp32_LossSoftmaxCrossEntropy( const float* y_buf, const float* t_buf, float* dy_buf, double* loss_buf, float reciprocal_frame_size, int node_size, int frame_size, int frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; if (frame >= frame_size) { return; } // max float c = y_buf[frame]; for ( int node = 1; node < node_size; ++node) { c = max(c, y_buf[node * frame_stride + frame]); } // sum(exp(y - c)) float sum = 0; for ( int node = 0; node < node_size; ++node) { sum += ::exp(y_buf[node * frame_stride + frame] - c); } float loss_softmax; for ( int node = 0; node < node_size; ++node) { float y = y_buf[node * frame_stride + frame]; float t = t_buf[node * frame_stride + frame]; float softmax = exp(y - c) / sum; float dy = (softmax - t) * reciprocal_frame_size; dy_buf[node * frame_stride + frame] = dy; if ( t > 0 ) { loss_softmax = softmax; } } loss_buf[frame] = log((double)loss_softmax + 1.0e-7); } __global__ void kernal_fp32_LossSoftmaxCrossEntropy_Sum( double* loss_buf, double* loss, int frame_size ) { float sum = 0; for ( int frame = 0; frame < frame_size; ++frame) { sum += loss_buf[frame]; } *loss += -sum; } BBCU_DLL_EXPORT int bbcu_fp32_LossSoftmaxCrossEntropy ( const float* dev_y_buf, const float* dev_t_buf, float* dev_dy_buf, double* dev_loss_buf, double* dev_loss, int node_size, int frame_size, int frame_stride, int batch_size, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // dim3 block(512); dim3 grid((frame_size + (block.x-1)) / block.x); block.x = ::min((int)block.x, (int)frame_size); hipLaunchKernelGGL(( kernal_fp32_LossSoftmaxCrossEntropy), dim3(grid), dim3(block), 0, streamId, dev_y_buf, dev_t_buf, dev_dy_buf, dev_loss_buf, 1.0f / (float)batch_size, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); // kernal_fp32_LossSoftmaxCrossEntropy_Sum << <1, 1, 0, streamId >> > ( dev_loss_buf, dev_loss, frame_size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
694ba2b214f69c15c39dfd18b13eba88b249ea08.cu
#include <algorithm> #include <iostream> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" __global__ void kernal_fp32_LossSoftmaxCrossEntropy( const float* y_buf, const float* t_buf, float* dy_buf, double* loss_buf, float reciprocal_frame_size, int node_size, int frame_size, int frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; if (frame >= frame_size) { return; } // max探索 float c = y_buf[frame]; for ( int node = 1; node < node_size; ++node) { c = max(c, y_buf[node * frame_stride + frame]); } // sum(exp(y - c)) float sum = 0; for ( int node = 0; node < node_size; ++node) { sum += std::exp(y_buf[node * frame_stride + frame] - c); } float loss_softmax; for ( int node = 0; node < node_size; ++node) { float y = y_buf[node * frame_stride + frame]; float t = t_buf[node * frame_stride + frame]; float softmax = exp(y - c) / sum; float dy = (softmax - t) * reciprocal_frame_size; dy_buf[node * frame_stride + frame] = dy; if ( t > 0 ) { loss_softmax = softmax; } } loss_buf[frame] = log((double)loss_softmax + 1.0e-7); } __global__ void kernal_fp32_LossSoftmaxCrossEntropy_Sum( double* loss_buf, double* loss, int frame_size ) { float sum = 0; for ( int frame = 0; frame < frame_size; ++frame) { sum += loss_buf[frame]; } *loss += -sum; } BBCU_DLL_EXPORT int bbcu_fp32_LossSoftmaxCrossEntropy ( const float* dev_y_buf, const float* dev_t_buf, float* dev_dy_buf, double* dev_loss_buf, double* dev_loss, int node_size, int frame_size, int frame_stride, int batch_size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // 計算 dim3 block(512); dim3 grid((frame_size + (block.x-1)) / block.x); block.x = std::min((int)block.x, (int)frame_size); kernal_fp32_LossSoftmaxCrossEntropy<<<grid, block, 0, streamId>>>( dev_y_buf, dev_t_buf, dev_dy_buf, dev_loss_buf, 1.0f / (float)batch_size, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); // 損失集計 kernal_fp32_LossSoftmaxCrossEntropy_Sum << <1, 1, 0, streamId >> > ( dev_loss_buf, dev_loss, frame_size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
c4cd59a2b8534c1b8303d4e359837aecaf4877b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <iostream> #include <string> #include <vector> #include "gpuFunctions.h" #include "TimingFunctions.h" using namespace std; #define threads 16 #define Tol ((double) 0.0001) #define max_it 2500 __device__ int counter; typedef thrust::tuple<int, int, float> tuple; typedef thrust::tuple<int, int, double> jacTuple; struct isZero { __host__ __device__ bool operator() (const tuple& a) { const double x = thrust::get<2>(a); return x == 0; }; }; struct is_PVbusRow { int id; is_PVbusRow(int num) : id(num) {}; __host__ __device__ bool operator () (const jacTuple& tup) { const int row = thrust::get<0> (tup); //const int col = thrust::get<1> (tup); return ((row == id)); } }; struct is_PVbusCol { int id; is_PVbusCol(int num) : id(num) {}; __host__ __device__ bool operator () (const jacTuple& tup) { const int col = thrust::get<1> (tup); return ((col == id)); } }; //Main Menu function - allows user to select power system for simulation void optionSelect(int option) { ifstream busdata, linedata; int numLines=0; string line; if ((option<1) || (option>8)) { cout<<"Not a valid option"<<endl; answerSelect(); } else if (option==1) { cout<<"----IEEE 14 bus system----\n"<<endl; linedata.open("linedata.txt"); while (getline (linedata, line)) { ++numLines; } linedata.close(); //pointer is at EOF. Need to close and reopen to stream data into variables busdata.open("busdata.txt"); linedata.open("linedata.txt"); IEEEStandardBusSystems(14, busdata, linedata, numLines); //calls function to execute data intialization for 14 bus system - N.B. read from text file (for now) answerSelect(); } else if (option==2){ cout<<"----IEEE 118 bus system----\n"<<endl; linedata.open("118Bus_LineData.txt"); while (getline (linedata, line)) { ++numLines; } linedata.close(); busdata.open("118BusData.txt"); linedata.open("118Bus_LineData.txt"); IEEEStandardBusSystems(118, busdata, linedata, numLines); answerSelect(); } else if (option==3) { cout<<"----IEEE 300 bus system----"<<endl; linedata.open("300Bus_LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("300BusData.txt"); linedata.open("300Bus_LineData.txt"); IEEEStandardBusSystems(300, busdata, linedata, numLines); answerSelect(); } else if (option==4) { cout<<"----Polish Winter Peak 2383 bus system----"<<endl; linedata.open("2383LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("2383BusData.txt"); linedata.open("2383LineData.txt"); IEEEStandardBusSystems(2383, busdata, linedata, numLines); answerSelect(); } else if (option==5) { cout<<"----Polish Summer Peak 3120 bus system----"<<endl; linedata.open("3120LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("3120BusData.txt"); linedata.open("3120LineData.txt"); IEEEStandardBusSystems(3120, busdata, linedata, numLines); answerSelect(); } else if (option==6) { cout<<"----PEGASE 6515 bus system----"<<endl; linedata.open("6515LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("6515BusData.txt"); linedata.open("6515LineData.txt"); IEEEStandardBusSystems(6515, busdata, linedata, numLines); answerSelect(); } else if (option==7) { cout<<"----PEGASE 9241 bus system----"<<endl; linedata.open("9241linedata.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("9241busdata.txt"); linedata.open("9241linedata.txt"); IEEEStandardBusSystems(9241, busdata, linedata, numLines); answerSelect(); } /*else if (option==7) { cout<<"----PEGASE 13659 bus system----"<<endl; linedata.open("13659linedata.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("13659busdata.txt"); linedata.open("13659linedata.txt"); IEEEStandardBusSystems(13659, busdata, linedata, numLines); answerSelect(); }*/ else if (option==8) { cout<<"----Case 6 bus system----"<<endl; linedata.open("6BusLineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("6BusData.txt"); linedata.open("6BusLineData.txt"); IEEEStandardBusSystems(6, busdata, linedata, numLines); answerSelect(); } } //Allows user to enter a valid option from the Main Menu void answerSelect() { char answer; int option; cout<<"\nDo you want to perform another simulation (y/n)?"<<endl; cin>>answer; if (cin.fail()) { cin.clear(); cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); cout<<"Invalid option"<<endl; } if ((answer=='y') || (answer=='Y')) { system("CLS"); //clears text on command line interface cout<<"\nPlease select one of the following options for simulation:"<<endl; //cout<<"1. IEEE 14 bus system\n2. IEEE 118 bus system\n3. IEEE 300 bus system\n4. Polish Winter Peak 2383 Bus System\n5. Polish Summer Peak 3120 bus system\n6. PEGASE 9241 bus system\n7. PEGASE 13659 bus system"<<endl; cout<<"1. IEEE 14 bus system\n2. IEEE 118 bus system\n3. IEEE 300 bus system\n4. Polish Winter Peak 2383 Bus System\n5. Polish Summer Peak 3120 bus system\n6. PEGASE 6515 bus system\n7. PEGASE 9241 bus system\n8. 6 bus system"<<endl; cout<<"Your option: "; cin>>option; optionSelect(option); } else if((answer=='n') || (answer=='N')) { cout<<"Thank you for using this program"<<endl; exit(0); //exits program } else { cout<<"Invalid response"<<endl; answerSelect(); } } //This is the "main" function in gpuFunctions.cu where the NR load flow solution of standard power systems occurs int IEEEStandardBusSystems(int numberOfBuses, ifstream &busData, ifstream &lineData, int numLines) { //hipProfilerStart(); //-------------------------------------------------------------VARIABLE DECLARATION SECTION--------------------------------------------------------------------------- //bus data ifstream variables int bus_i, bustype, busDataIdx=0, lineDataIdx=0, N_g=0, N_p=0, jacSize=0, slackBus, numSlackLines = 0; double P,Q, Vmag, Vang, VMax, VMin; //line data ifstream variables int fromBus, toBus; double r, x, b; //dynamic arrays to hold bus data int *busNum = new int[numberOfBuses], *busType = new int[numberOfBuses], *tempBusNum = new int[numberOfBuses]; double *Pd = new double[numberOfBuses], *Qd = new double[numberOfBuses], *Vm=new double[numberOfBuses], *Va=new double[numberOfBuses], *Vmax=new double[numberOfBuses], *Vmin=new double[numberOfBuses], *P_eq = new double[numberOfBuses], *Q_eq = new double[numberOfBuses], *theta = new double[numberOfBuses]; //dynamic arrays to hold line data int *fromBusArr = new int[numLines], *toBusArr = new int[numLines]; int *PQindex; double *R = new double[numLines], *X = new double[numLines], *Bact = new double[numLines]; complex<double> *B1 = new complex<double> [numLines]; hipDoubleComplex *B = new hipDoubleComplex[numLines]; //N.B.: hipDoubleComplex data type can be used on host once CUDA library is included. hipDoubleComplex *Z = new hipDoubleComplex[numLines]; //Vectors needed for push_back() operations to build PQindex[] and PQspec[] vector<double> Pval_spec, Qval_spec, Pval_calc, Qval_calc; vector<int> Pindex, Qindex; vector<double> V_mag, V_ang; //for constructing stateVector[] from hot start double *PQspec, *PQcalc; //Device variables - to be allocated on and copied to the GPU int *dev_fromBus, *dev_toBus, *dev_PQindex, *dev_PQbuses, *dev_PVbuses; double *dev_Pd, *dev_Qd, *dev_Peq, *dev_Qeq, *dev_Vmag, *dev_theta, *dev_powerMismatch, *dev_stateVector, *dev_PQspec, *dev_PQcalc, *dev_xi; hipDoubleComplex *dev_z, *dev_B; //-----------------------sparse--------------------------- int ynnz = (2*numLines)+numberOfBuses; //createYBus() //int ynnz = numLines + numberOfBuses; //createYBusConcise() hipDoubleComplex *yHost = new hipDoubleComplex[ynnz]; int *yRowHost = new int[ynnz], *yColHost = new int[ynnz]; hipDoubleComplex *yDev; int *yRowDev, *yColDev; //arrays holding indices of off-diagonal elements in either upper or lower triangle int* dev_yUpperTriangleRow, *dev_yUpperTriangleCol; int *yUpperTriangleRow = new int[numLines], *yUpperTriangleCol = new int[numLines]; hipMalloc((void**)&dev_yUpperTriangleRow, numLines*sizeof(int)); hipMalloc((void**)&dev_yUpperTriangleCol, numLines*sizeof(int)); //-------------------------------------------------------- //In the linear system Ax=b, the vectors are of the same degree as the Jacobian matrix double *powerMismatch; //b in Ax = b double *stateVector; //x in Ax = b //----------------------------------------------Prompt user for flat start or hot start----------------------------------------- /*int startType; cout<<"What type of simulation would you like to perform:\n1)Flat start - V = 1+j0\n2)\"Hot\" start - V magnitude and angle come from previous solution"<<endl; cin>>startType;*/ //Error variables for mem copies and mem allocation on GPU hipError_t cudaStat1, cudaStat2, cudaStat3, cudaStat4, cudaStat5, cudaStat6, cudaStat7, cudaStat8, cudaStat9, cudaStat10, cudaStat11, cudaStat12, cudaStat13, cudaStat14, cudaStat15, cudaStat16, cudaStat17, cudaStat18; //Variables for timing on GPU hipEvent_t start, stop; float elapsedTime; //----------------------------------------------------------------------------------------------------------------------------------------------------------------- //Reading from busdata.txt (tab delimited) if (!busData) { cout<<"There was a problem reading the 'Bus Data' file."<<endl; return 1; } while (busData>>bus_i>>bustype>>P>>Q>>Vmag>>Vang>>VMax>>VMin) { busNum[busDataIdx]=bus_i; busType[busDataIdx]=bustype; Pd[busDataIdx]=P/100; Qd[busDataIdx]=Q/100; Vm[busDataIdx]=Vmag; Va[busDataIdx]=0.01745329252*Vang; Vmax[busDataIdx]=VMax; Vmin[busDataIdx]=VMin; busDataIdx++; } //For flat start, not using previous solution. V = 1+j0. Vmag = 1pu for PQ and slack buses. Vmag is known at PV buses. /*if (startType == 1) { for (int i=0; i<numberOfBuses; i++) { if (busType[i]!=2) Vm[i] = 1; Va[i] = 0; } }*/ vector<int> PVbusesVec; //Constructing PQindex vector which holds indices of PV and PQ buses for (int i=0; i<numberOfBuses; i++) { if (busType[i] == 1) { N_p++; //increment PQ bus counter since PQ is represented by 1 Pindex.push_back(i); Qindex.push_back(i); } if (busType[i] == 2) { N_g++; //increment PV bus counter since PV is represented by 2 Pindex.push_back(i); PVbusesVec.push_back(i); //For a PV bus there is no initial value of Q } if (busType[i] == 3) { slackBus = i; } } jacSize = numberOfBuses+N_p-1; //Degree of the Jacobian matrix and length of vectors in linear system int *PQbuses = &Qindex[0]; int *PVbuses = &PVbusesVec[0]; hipMalloc((void**)&dev_PQbuses, Qindex.size()*sizeof(int)); hipMalloc((void**)&dev_PVbuses, PVbusesVec.size()*sizeof(int)); hipMemcpy(dev_PQbuses, PQbuses, Qindex.size()*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_PVbuses, PVbuses, PVbusesVec.size(), hipMemcpyHostToDevice); //int jacSizeFull = (2*numberOfBuses) - 2; Pindex.insert(Pindex.end(), Qindex.begin(), Qindex.end()); //joins Pindex and Qindex to get a vector which holds indices of both PV and PQ buses. Store in Pindex PQindex = &Pindex[0]; //store vector Pindex as an array PQindex - compatible with GPU /*ofstream check("PQindex.txt"); for (unsigned int i=0; i<Pindex.size(); i++) check<<PQindex[i]<<endl; check.close(); check.open("PQbuses.txt"); for (unsigned int i=0; i<Qindex.size(); i++) check<<PQbuses[i]<<endl; check.close(); cout<<N_g<<endl; cout<<PVbusesVec.size()<<endl; check.open("PVbuses.txt"); for (unsigned int i=0; i<PVbusesVec.size(); i++) check<<PVbuses[i]<<endl; check.close();*/ //In the linear system Ax=b, the vectors are of the same degree as the Jacobian matrix stateVector = new double[jacSize]; //x in Ax = b powerMismatch = new double[jacSize]; //b in Ax = b //stateVector = new double[jacSizeFull]; //x in Ax = b //powerMismatch = new double[jacSizeFull]; //b in Ax = b //Allocation of GPU memory cudaStat1 = hipMalloc((void**)&dev_fromBus, numLines*sizeof(int)); cudaStat2 = hipMalloc((void**)&dev_toBus, numLines*sizeof(int)); cudaStat3 = hipMalloc((void**)&dev_z, numLines*sizeof(hipDoubleComplex)); cudaStat4 = hipMalloc((void**)&dev_B, numLines*sizeof(hipDoubleComplex)); cudaStat6 = hipMalloc((void**)&dev_Pd, numberOfBuses*sizeof(double)); cudaStat7 = hipMalloc((void**)&dev_Qd, numberOfBuses*sizeof(double)); cudaStat8 = hipMalloc((void**)&dev_Vmag, numberOfBuses*sizeof(double)); cudaStat9 = hipMalloc((void**)&dev_theta, numberOfBuses*sizeof(double)); cudaStat10 = hipMalloc((void**)&dev_Peq, numberOfBuses*sizeof(double)); cudaStat11 = hipMalloc((void**)&dev_Qeq, numberOfBuses*sizeof(double)); cudaStat12 = hipMalloc((void**)&dev_powerMismatch, jacSize*sizeof(double)); cudaStat13 = hipMalloc((void**)&dev_stateVector, jacSize*sizeof(double)); cudaStat14 = hipMalloc((void**)&dev_PQindex, jacSize*sizeof(int)); cudaStat15 = hipMalloc((void**)&dev_PQspec, jacSize*sizeof(double)); cudaStat16 = hipMalloc((void**)&dev_PQcalc, jacSize*sizeof(double)); cudaStat17 = hipMalloc((void**)&dev_xi, jacSize*sizeof(double)); /*cudaStat12 = hipMalloc((void**)&dev_powerMismatch, jacSizeFull*sizeof(double)); cudaStat13 = hipMalloc((void**)&dev_stateVector, jacSizeFull*sizeof(double)); cudaStat14 = hipMalloc((void**)&dev_PQindex, jacSizeFull*sizeof(int)); cudaStat15 = hipMalloc((void**)&dev_PQspec, jacSizeFull*sizeof(double)); cudaStat16 = hipMalloc((void**)&dev_PQcalc, jacSizeFull*sizeof(double)); cudaStat17 = hipMalloc((void**)&dev_xi, jacSizeFull*sizeof(double));*/ cudaStat18 = hipMalloc((void**)&yDev, ynnz*sizeof(hipDoubleComplex)); hipMalloc((void**)&yRowDev, ynnz*sizeof(int)); hipMalloc((void**)&yColDev, ynnz*sizeof(int)); if (cudaStat1 != hipSuccess || cudaStat2 != hipSuccess || cudaStat3 != hipSuccess || cudaStat4 != hipSuccess || cudaStat6 != hipSuccess || cudaStat7 != hipSuccess || cudaStat8 != hipSuccess || cudaStat9 != hipSuccess || cudaStat10 != hipSuccess || cudaStat11 != hipSuccess || cudaStat12 != hipSuccess || cudaStat13 != hipSuccess || cudaStat14 != hipSuccess || cudaStat15 != hipSuccess || cudaStat16 != hipSuccess || cudaStat17 != hipSuccess || cudaStat18 != hipSuccess) { cout<<"Device memory allocation failed"<<endl; return 1; } //-------------------------------------------------------------------BUS ADMITTANCE MATRIX CONSTRUCTION-------------------------------------------------------------- //Reading from linedata.txt (tab delimited) if(!lineData) { cout<<"There was a problem reading the 'Line Data' file"<<endl; return 1; } while (lineData>>fromBus>>toBus>>r>>x>>b) { fromBusArr[lineDataIdx] = fromBus; toBusArr[lineDataIdx] = toBus; R[lineDataIdx] = r; X[lineDataIdx] = x; //r+jX Bact[lineDataIdx] = b; lineDataIdx++; } for (int i=0; i<numLines; i++) { B[i] = make_cuDoubleComplex(0, Bact[i]/2); Z[i] = make_cuDoubleComplex(R[i], X[i]); } //This is used to number buses consecutively from 0-299 for 300 bus case for (int i=0; i<numberOfBuses; i++) { tempBusNum[i] = i; } //Arranging bus numbering for (int i=0; i<numLines; i++) { for (int j=0; j<numberOfBuses; j++) { if (fromBusArr[i] == busNum[j]) fromBusArr[i] = tempBusNum[j]; if (toBusArr[i] == busNum[j]) toBusArr[i] = tempBusNum[j]; } if (fromBusArr[i] == slackBus) { numSlackLines+=2; //numSlackLines++; //concise } if (toBusArr[i] == slackBus) { numSlackLines+=2; //numSlackLines++; //concise } } numSlackLines++; //for element y(0,0) for (int i=0; i<ynnz; i++) yHost[i] = make_cuDoubleComplex(0,0); //copy memory from host to device - parameters needed for createYbus() cudaStat1 = hipMemcpy(dev_fromBus, fromBusArr, numLines*sizeof(int), hipMemcpyHostToDevice); cudaStat2 = hipMemcpy(dev_toBus, toBusArr, numLines*sizeof(int), hipMemcpyHostToDevice); cudaStat3 = hipMemcpy(dev_z, Z, numLines*sizeof(hipDoubleComplex), hipMemcpyHostToDevice); cudaStat4 = hipMemcpy(dev_B, B, numLines*sizeof(hipDoubleComplex), hipMemcpyHostToDevice); cudaStat5 = hipMemcpy(yDev, yHost, ynnz*sizeof(hipDoubleComplex), hipMemcpyHostToDevice); if (cudaStat1 != hipSuccess || cudaStat2 != hipSuccess || cudaStat3 != hipSuccess || cudaStat4 != hipSuccess || cudaStat5 != hipSuccess) { cout<<"Device memory copy failed"<<endl; return 1; } //grid and block dimensions - user defined dim3 dimBlock(threads, threads); //number of threads dim3 dimGrid((numberOfBuses+(threads-1))/threads, (numberOfBuses+(threads-1))/threads); dim3 ythreads(threads); dim3 yblocks((numLines+(threads-1))/threads); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //launch kernel once data has been copied to GPU hipLaunchKernelGGL(( createYBusSparse), dim3(yblocks), dim3(ythreads), 0, 0, numLines, numberOfBuses, dev_fromBus, dev_toBus, dev_z, dev_B, yDev, yRowDev, yColDev); //createYBusSparseConcise<<<yblocks, ythreads>>>(numLines, numberOfBuses, dev_fromBus, dev_toBus, dev_z, dev_B, yDev, yRowDev, yColDev); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Y-bus Sparse: "<<elapsedTime<<" ms"<<endl; //------------------------------------Sorting yrow and ycol----------------------------------------------------- /*int *dev_yUpperTriangleRow2, *dev_yUpperTriangleCol2; //second array for sort_by_key() //int* dev_yRowTemp, *dev_yColTemp; //third set of arrays to sort row index using column index as key int *yUpperTriangleRow2, *yUpperTriangleCol2; yUpperTriangleRow2 = new int[numLines]; yUpperTriangleCol2 = new int[numLines]; hipMalloc((void**)&dev_yUpperTriangleRow2, numLines*sizeof(int)); hipMalloc((void**)&dev_yUpperTriangleCol2, numLines*sizeof(int)); //Copy row and column indices to duplicate arrays hipMemcpy(dev_yUpperTriangleRow2, dev_yUpperTriangleRow, numLines*sizeof(int), hipMemcpyDeviceToDevice); hipMemcpy(dev_yUpperTriangleCol2, dev_yUpperTriangleCol, numLines*sizeof(int), hipMemcpyDeviceToDevice); //wrapping device pointers to arrays in device memory to treat as thrust vectors and use thrust sort function thrust::device_ptr<int> yrowPtr(dev_yUpperTriangleRow); thrust::device_ptr<int> ycolPtr(dev_yUpperTriangleCol); //pointers to the original sparse Y thrust::device_ptr<int> ycolPtr2(dev_yUpperTriangleCol2); thrust::device_ptr<int> yrowPtr2(dev_yUpperTriangleRow2); //create wrapper to copy of yrow thrust::stable_sort_by_key(ycolPtr, ycolPtr+numLines, yrowPtr, thrust::less<int>()); //sort original yRow by original yCol using wrappers (both are sorted) thrust::stable_sort_by_key(yrowPtr2, yrowPtr2+numLines, ycolPtr2, thrust::less<int>()); //sort copy of yCol by copy of yRow (both are sorted) //Copy row and column indices (sorted by column) to duplicate yRow and yCol index host arrays hipMemcpy(yUpperTriangleRow, dev_yUpperTriangleRow, numLines*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(yUpperTriangleCol, dev_yUpperTriangleCol, numLines*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(yUpperTriangleRow2, dev_yUpperTriangleRow2, numLines*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(yUpperTriangleCol2, dev_yUpperTriangleCol2, numLines*sizeof(int), hipMemcpyDeviceToHost);*/ //------------------------------------------------------------------------------------------------------------------ hipMemcpy(yHost, yDev, ynnz*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost); hipMemcpy(yRowHost, yRowDev, ynnz*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(yColHost, yColDev, ynnz*sizeof(int), hipMemcpyDeviceToHost); ofstream output; output.open("SparseYbus.txt"); for (int i=0; i<ynnz; i++) { output<<yRowHost[i]<<"\t"<<yColHost[i]<<"\t"<<"("<<cuCreal(yHost[i])<<","<<cuCimag(yHost[i])<<")"<<endl; } output.close(); //--------------------------------------------------------------------------Power Equations-------------------------------------------------------------------------- for (int i=0; i<numberOfBuses; i++) { theta[i] = 0; P_eq[i] = 0; Q_eq[i] = 0; } cudaStat1 = hipMemcpy(dev_Vmag, Vm, numberOfBuses*sizeof(double), hipMemcpyHostToDevice); //cudaStat2 = hipMemcpy(dev_theta, Va, numberOfBuses*sizeof(double), hipMemcpyHostToDevice); //FOR HOT START cudaStat2 = hipMemcpy(dev_theta, theta, numberOfBuses*sizeof(double), hipMemcpyHostToDevice); //FOR FLAT START cudaStat3 = hipMemcpy(dev_Peq, P_eq, numberOfBuses*sizeof(double), hipMemcpyHostToDevice); cudaStat4 = hipMemcpy(dev_Qeq, Q_eq, numberOfBuses*sizeof(double), hipMemcpyHostToDevice); if (cudaStat1 != hipSuccess || cudaStat2 != hipSuccess || cudaStat3 != hipSuccess || cudaStat4 != hipSuccess){ cout<<"Device memory copy failed"<<endl; return 1; } dim3 pthreads(threads); dim3 pblock((ynnz+(threads-1))/threads); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( powerEqnSparse), dim3(pblock), dim3(pthreads), 0, 0, dev_Peq, dev_Qeq, yDev, yRowDev, yColDev, dev_Vmag, dev_theta, ynnz); //powerEqnSparseConcise<<<pblock, pthreads>>>(dev_Peq, dev_Qeq, yDev, yRowDev, yColDev, dev_Vmag, dev_theta, ynnz, numLines); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Sparse PQ eqns: "<<elapsedTime<<" ms"<<endl; hipMemcpy(P_eq, dev_Peq, numberOfBuses*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(Q_eq, dev_Qeq, numberOfBuses*sizeof(double), hipMemcpyDeviceToHost); output.open("power equations sparse.txt"); for (int i=0; i<numberOfBuses; i++) { output<<P_eq[i]<<endl; } output<<endl; for (int i=0; i<numberOfBuses; i++) { output<<Q_eq[i]<<endl; } cout<<endl; output.close(); //To construct the power mismatch vector //SPECIFIED values of P and Q read from text file into Pd and Qd. Place these values in separate vectors for SPECIFIED value of P and Q for (int i=0; i<numberOfBuses; i++) { if (busType[i]!=3) { //if bus is not a slack bus Pval_spec.push_back(Pd[i]); Pval_calc.push_back(P_eq[i]); V_ang.push_back(theta[i]); } if (busType[i]==1) { //if bus is a PQ bus Qval_spec.push_back(Qd[i]); Qval_calc.push_back(Q_eq[i]); V_mag.push_back(Vm[i]); } } //power mismatch vector for Full Jacobian matrix (minus slack bus) /*for (int i=0; i<numberOfBuses; i++) { if (busType[i]!=3) { Pval_spec.push_back(Pd[i]); Pval_calc.push_back(P_eq[i]); V_ang.push_back(theta[i]); Qval_spec.push_back(Qd[i]); Qval_calc.push_back(Q_eq[i]); V_mag.push_back(Vm[i]); } }*/ Pval_spec.insert(Pval_spec.end(), Qval_spec.begin(), Qval_spec.end()); //Append vectors yield vector of SPECIFIED real and rxve power PQspec = &Pval_spec[0]; //Create an array and store the appended vector in it to use on the GPU //hipMemcpy(dev_PQspec, PQspec, jacSizeFull*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_PQspec, PQspec, jacSize*sizeof(double), hipMemcpyHostToDevice); Pval_calc.insert(Pval_calc.end(), Qval_calc.begin(), Qval_calc.end()); //Appended vectors yield vector of CALCULATED real and rxve power PQcalc = &Pval_calc[0]; //Assign the appended vector to an array for use on the GPU //Append these vectors to get stateVector (Vang and Vmag) V_ang.insert(V_ang.end(), V_mag.begin(), V_mag.end()); stateVector = &V_ang[0]; output.open("stateVector.txt"); for (unsigned int i=0; i<V_ang.size(); i++) output<<stateVector[i]<<"; "<<endl; output.close(); //Power mismatch vector, b in Ax=b, is found by subtracting calculated from specified. output.open("powerMismatch.txt"); for (unsigned int i=0; i<Pval_spec.size(); i++) { powerMismatch[i] = PQspec[i] - PQcalc[i]; output<<powerMismatch[i]<<";"<<endl; } output.close(); //------------------------------------------------Creation of Jacobian Matrix---------------------------------------------------------- cout<<"Y nnz: "<<ynnz<<endl; //int nnzJac= (ynnz + numLines - numSlackLines)*4; //concise int nnzJac= (ynnz - numSlackLines)*4; cout<<"NNZ Jac before: "<<nnzJac<<endl; bool *dev_boolRow, *dev_boolCol; int *dev_J22row, *dev_J22col; hipMalloc((void**)&dev_boolRow, ynnz*sizeof(bool)); hipMalloc((void**)&dev_boolCol, ynnz*sizeof(bool)); hipMalloc((void**)&dev_J22row, ynnz*sizeof(int)); hipMalloc((void**)&dev_J22col, ynnz*sizeof(int)); //Initial algorithm to count nnzJac bool *boolCheck = new bool[numLines]; for (int i=0; i<numLines; i++) boolCheck[i] = 0; bool *dev_boolCheck; int J12count=0; int J22count = 0; int* dev_J12count, *dev_J22count; hipMalloc((void**)&dev_J12count, sizeof(int)); hipMalloc((void**)&dev_J22count, sizeof(int)); hipMalloc((void**)&dev_boolCheck, numLines*sizeof(bool)); hipMemcpy(dev_boolCheck, boolCheck, numLines*sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(dev_J12count, &J12count, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_J22count, &J22count, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( countNnzJac), dim3(yblocks),dim3(ythreads), 0, 0, dev_boolCheck, yRowDev, yColDev, yDev, dev_PVbuses, N_g, slackBus, dev_J12count, dev_J22count); hipMemcpy(&J12count, dev_J12count, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&J22count, dev_J22count, sizeof(int), hipMemcpyDeviceToHost); cout<<J12count<<endl; cout<<J22count<<endl; /*startCounter(); for (int i=0; i<numLines; i++) { for (int j=0; j<N_g; j++) { if (yRowHost[i] != slackBus && yColHost[i] == PVbuses[j]) { J12count++; boolCheck[i] = true; } if (yColHost[i] != slackBus && yRowHost[i] == PVbuses[j]) { J12count++; boolCheck[i] = true; } } if (boolCheck[i] == true) J22count++; }*/ J12count+=N_g; J22count*=2; J22count+=N_g; nnzJac = nnzJac - (J12count*2) - J22count; //cout<<"Time taken to find nnzJac: "<<getCounter()<<endl; cout<<"nnzJac: "<<nnzJac<<endl; //-------------------------------------------------------------------------------------------- //int jacCount = ynnz - numSlackLines; //int nnzJac = jacCount*4; //for full jacobian (not reduced based on PQ buses) int h_counter = 0; hipMemcpyToSymbol(counter, &h_counter, sizeof(int), 0, hipMemcpyHostToDevice); //Jacobian variables int *jacRow, *jacCol; double* jac; jacRow = new int[nnzJac]; jacCol = new int[nnzJac]; jac = new double[nnzJac]; int *dev_jacRow, *dev_jacCol; double* dev_jac; hipMalloc((void**)&dev_jacRow, nnzJac*sizeof(int)); hipMalloc((void**)&dev_jacCol, nnzJac*sizeof(int)); hipMalloc((void**)&dev_jac, nnzJac*sizeof(double)); cudaStat2 = hipMemcpy(dev_PQindex, PQindex, jacSize*sizeof(int), hipMemcpyHostToDevice); //dim3 dimGridJac2((ynnz+(threads-1))/threads, (jacSize+(threads-1))/threads, (jacSize+3)/4); //dim3 dimBlockJac2(threads, threads, 4); //dim3 dimGridJac((ynnz+(threads-1))/threads, (N_p+(threads-1)/threads)); dim3 dimBlockJac(threads, threads); dim3 dimGridJ22((ynnz+(threads-1))/threads); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //createJ11<<<dimGridJ22, threads>>>(ynnz, numLines, numSlackLines, slackBus, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); hipLaunchKernelGGL(( createJ11Copy), dim3(dimGridJ22), dim3(threads), 0, 0, ynnz, slackBus, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //createJacobianSparse3<<<dimGridJac, threads>>>(ynnz, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQbuses, N_p, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); hipLaunchKernelGGL(( createJ12_J21), dim3(dimGridJ22), dim3(threads), 0, 0, ynnz, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQbuses, N_p, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( createJ22), dim3(dimGridJ22), dim3(threads), 0, 0, ynnz, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); //createJacobianSparse3<<<dimGridJac, threads>>>(ynnz, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQbuses, N_p, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); //createJacobianSparse2<<<dimGridJac2, dimBlockJac2>>>(ynnz, jacCount, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQindex, N_g, N_p, jacSize); //createJacobianSparse<<<dimGridJac, threads>>>(ynnz, jacCount, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl; /*hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); createJacobianSparse<<<dimGridJ22, threads>>>(ynnz, jacCount, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl;*/ //----------------------------------------Thrust remove_if() trial--------------------------------------- /*thrust::device_ptr<int> rowVecPtr(dev_jacRow); thrust::device_ptr<int> colVecPtr(dev_jacCol); thrust::device_ptr<double> valVecPtr(dev_jac); thrust::device_vector<int> rowVec(rowVecPtr, rowVecPtr+nnzJac); thrust::device_vector<int> colVec(colVecPtr, colVecPtr+nnzJac); thrust::device_vector<double> valVec(valVecPtr, valVecPtr+nnzJac); typedef thrust::device_vector<int>::iterator intDiter; //typedef iterator for row and col typedef thrust::device_vector<double>::iterator doubleDiter; //typedef iterator for double values typedef thrust::tuple<intDiter, intDiter, doubleDiter> iteratorTuple; //iterator tuple typedef thrust::zip_iterator<iteratorTuple> zipIt; //zip vectors together using tuple hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int i=0; i<N_g; i++) { zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+rowVec.size(); int eraseJacIdx = (numberOfBuses - 2) + PVbuses[i]; zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, is_PVbusRow(eraseJacIdx)); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Remove jac row indices: "<<elapsedTime<<" ms"<<endl; cout<<rowVec.size()<<endl; cout<<colVec.size()<<endl; cout<<valVec.size()<<endl; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int i=0; i<N_g; i++) { zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+rowVec.size(); int eraseJacIdx = (numberOfBuses - 2) + PVbuses[i]; zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, is_PVbusCol(eraseJacIdx)); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Remove jac col indices: "<<elapsedTime<<" ms"<<endl; cout<<rowVec.size()<<endl; cout<<colVec.size()<<endl; cout<<valVec.size()<<endl; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+rowVec.size(); zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, isZero()); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Remove jac values: "<<elapsedTime<<" ms"<<endl; cout<<rowVec.size()<<endl; cout<<colVec.size()<<endl; cout<<valVec.size()<<endl; int* dev_jacRowNew = thrust::raw_pointer_cast(&rowVec[0]); int* dev_jacColNew = thrust::raw_pointer_cast(&colVec[0]); double* dev_jacNew = thrust::raw_pointer_cast(&valVec[0]);*/ //------------------------------------------------------------------------------------------------------- hipMemcpy(jacRow, dev_jacRow, nnzJac*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(jacCol, dev_jacCol, nnzJac*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(jac, dev_jac, nnzJac*sizeof(double), hipMemcpyDeviceToHost); output.open("GPUJac.txt"); for (int i=0; i<nnzJac; i++) output<<jacRow[i]<<"\t"<<jacCol[i]<<"\t"<<jac[i]<<endl; output.close(); //--------------------------------------------------------------SOLUTION OF LINEAR SYSTEM----------------------------------------------------------------- //Removing zeros from dev_jac after calculation //Create wrapper around device pointer thrust::device_ptr<int> rowVecPtr(dev_jacRow); thrust::device_ptr<int> colVecPtr(dev_jacCol); thrust::device_ptr<double> valuesPtr(dev_jac); //Copy to device_vector for functionality thrust::device_vector<int> rowVec(rowVecPtr, rowVecPtr+nnzJac); thrust::device_vector<int> colVec(colVecPtr, colVecPtr+nnzJac); thrust::device_vector<double> valVec(valuesPtr, valuesPtr+nnzJac); //typedef thrust::tuple<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator, thrust::device_vector<float>::iterator> iteratorTuple; typedef thrust::device_vector<int>::iterator intDiter; //typedef iterator for row and col typedef thrust::device_vector<double>::iterator doubleDiter; //typedef iterator for double values typedef thrust::tuple<intDiter, intDiter, doubleDiter> iteratorTuple; //iterator tuple typedef thrust::zip_iterator<iteratorTuple> zipIt; //zip vectors together using tuple zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+nnzJac; //Timing remove_if() and erase() operations hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, isZero()); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Thrust remove_if() for Jacobian: "<<elapsedTime<<" ms"<<endl; cout<<"nnzJac after removing zeros: "<<rowVec.size()<<endl; int* dev_jacRowNew = thrust::raw_pointer_cast(&rowVec[0]); int* dev_jacColNew = thrust::raw_pointer_cast(&colVec[0]); double* dev_jacNew = thrust::raw_pointer_cast(&valVec[0]); nnzJac = rowVec.size(); //need to sort COO format Jacobian matrix in row-major order to get CSR format int* dev_jacRow2, *dev_jacCol2; //second array for sort_by_key() hipMalloc((void**)&dev_jacRow2, nnzJac*sizeof(int)); hipMalloc((void**)&dev_jacCol2, nnzJac*sizeof(int)); hipMemcpy(dev_jacCol2, dev_jacColNew, nnzJac*sizeof(int), hipMemcpyDeviceToDevice); //wrapping device pointers to arrays in device memory to treat as thrust vectors and use thrust sort function thrust::device_ptr<int> rowVecPtr2(dev_jacRowNew); thrust::device_ptr<int> colVecPtr2(dev_jacColNew); thrust::device_ptr<double> valuesPtr2(dev_jacNew); thrust::device_ptr<int> colVec2(dev_jacCol2); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //Perform sorting (sort column indices and values) based on rows (corresponding array sorting) thrust::stable_sort_by_key(colVecPtr2, colVecPtr2+nnzJac, rowVecPtr2, thrust::less<int>()); thrust::stable_sort_by_key(colVec2, colVec2+nnzJac, valuesPtr2, thrust::less<int>()); hipMemcpy(dev_jacRow2, dev_jacRowNew, nnzJac*sizeof(int), hipMemcpyDeviceToDevice); thrust::device_ptr<int> rowVec2(dev_jacRow2); thrust::stable_sort_by_key(rowVecPtr2, rowVecPtr2+nnzJac, colVecPtr2, thrust::less<int>()); thrust::stable_sort_by_key(rowVec2, rowVec2+nnzJac, valuesPtr2, thrust::less<int>()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Sorting Jacobian to convert to CSR: "<<elapsedTime<<" ms"<<endl; //Required to represent coefficient matrix (Jacobian) in sparse (CSR) format int *csrRowPtrJac; //Setup cuSPARSE hipsparseStatus_t status; hipsparseHandle_t handle = 0; hipsparseMatDescr_t descr_A = 0; //Initialize cuSPARSE status = hipsparseCreate(&handle); if (status != HIPSPARSE_STATUS_SUCCESS) { cout<<"CUSPARSE Library Initialization failed"<<endl; return 1; } hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //Create and setup matrix descriptor for Coefficient Matrix hipsparseCreateMatDescr(&descr_A); hipsparseSetMatType(descr_A, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr_A, HIPSPARSE_INDEX_BASE_ZERO); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"CUPARSE Initialization: "<<elapsedTime<<" ms"<<endl; //cudaStat1 = hipMalloc((void**)&csrRowPtrJac, (jacSizeFull+1)*sizeof(int)); cudaStat1 = hipMalloc((void**)&csrRowPtrJac, (jacSize+1)*sizeof(int)); if (cudaStat1 != hipSuccess) { cout<<"Device memory allocation failed."<<endl; return 1; } hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //Convert matrix to sparse format (CSR) hipsparseXcoo2csr(handle, dev_jacRowNew, nnzJac, jacSize, csrRowPtrJac, HIPSPARSE_INDEX_BASE_ZERO); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"Converting COO to CSR: "<<elapsedTime<<" ms"<<endl; /*double eigen = 0.0; double *initVec = new double[jacSize]; double *dev_initVec, *dev_c; for (int i=0; i<jacSize; i++) initVec[i] = 1; cudaStat1 = hipMalloc((void**)&dev_initVec, jacSize*sizeof(double)); cudaStat2 = hipMalloc((void**)&dev_c, jacSize*sizeof(double)); if (cudaStat1 != hipSuccess || cudaStat2 != hipSuccess) { cout<<"Device memory allocation failed - BiCGStab variables."<<endl; return 1; } hipMemcpy(dev_initVec, initVec, jacSize*sizeof(double), hipMemcpyHostToDevice); powerMethod(csrRowPtrJac, dev_jacColNew, dev_jacNew, descr_A, nnzJac, initVec, dev_initVec, dev_c, handle, &eigen, jacSize); cout<<"Max eigenvalue of Jacobian matrix is: "<<eigen<<endl;*/ int* h_jacRowNew, *h_jacColNew, *h_csrJacRowPtr; double* h_jacNew; h_jacRowNew = new int[nnzJac]; h_jacColNew = new int[nnzJac]; h_jacNew = new double[nnzJac]; h_csrJacRowPtr = new int[jacSize+1]; hipMemcpy(h_jacRowNew, dev_jacRowNew, nnzJac*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_jacColNew, dev_jacColNew, nnzJac*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_jacNew, dev_jacNew, nnzJac*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(h_csrJacRowPtr, csrRowPtrJac, (jacSize+1)*sizeof(int), hipMemcpyDeviceToHost); output.open("new jacobian after elimination and sorting.txt"); for (int i=0; i<nnzJac; i++) output<<h_jacRowNew[i]<<"\t"<<h_jacColNew[i]<<"\t"<<h_jacNew[i]<<endl; for (int i=0; i<jacSize+1; i++) output<<h_csrJacRowPtr[i]<<endl; output.close(); //Setup cuBLAS hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); cublasStatus cublas_status; cublas_status = hipblasInit(); if (cublas_status!=HIPBLAS_STATUS_SUCCESS) { cout<<"cuBLAS Initialization Error!"<<endl; return 1; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"CUBLAS Initialization: "<<elapsedTime<<" ms"<<endl; //Vectors required in BiCGStab double *res, *r_tld, *p, *s, *t, *v, *p_hat, *s_hat; /*cudaStat1 = hipMalloc((void**)&res, jacSizeFull*sizeof(double)); cudaStat2 = hipMalloc((void**)&r_tld, jacSizeFull*sizeof(double)); cudaStat3 = hipMalloc((void**)&p, jacSizeFull*sizeof(double)); cudaStat4 = hipMalloc((void**)&p_hat, jacSizeFull*sizeof(double)); cudaStat5 = hipMalloc((void**)&s, jacSizeFull*sizeof(double)); cudaStat6 = hipMalloc((void**)&s_hat, jacSizeFull*sizeof(double)); cudaStat7 = hipMalloc((void**)&v, jacSizeFull*sizeof(double)); cudaStat8 = hipMalloc((void**)&t, jacSizeFull*sizeof(double));*/ cudaStat1 = hipMalloc((void**)&res, jacSize*sizeof(double)); cudaStat2 = hipMalloc((void**)&r_tld, jacSize*sizeof(double)); cudaStat3 = hipMalloc((void**)&p, jacSize*sizeof(double)); cudaStat4 = hipMalloc((void**)&p_hat, jacSize*sizeof(double)); cudaStat5 = hipMalloc((void**)&s, jacSize*sizeof(double)); cudaStat6 = hipMalloc((void**)&s_hat, jacSize*sizeof(double)); cudaStat7 = hipMalloc((void**)&v, jacSize*sizeof(double)); cudaStat8 = hipMalloc((void**)&t, jacSize*sizeof(double)); if (cudaStat1 != hipSuccess || cudaStat2 != hipSuccess || cudaStat3 != hipSuccess || cudaStat4 != hipSuccess || cudaStat5 != hipSuccess || cudaStat6 != hipSuccess || cudaStat7 != hipSuccess || cudaStat8 != hipSuccess) { cout<<"Device memory allocation failed - BiCGStab variables."<<endl; return 1; } hipMemcpy(dev_stateVector, stateVector, jacSize*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_xi, stateVector, jacSize*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_powerMismatch, powerMismatch, jacSize*sizeof(double), hipMemcpyHostToDevice); /*hipMemcpy(dev_stateVector, stateVector, jacSizeFull*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_xi, stateVector, jacSizeFull*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_powerMismatch, powerMismatch, jacSizeFull*sizeof(double), hipMemcpyHostToDevice);*/ ; //hipEventCreate(&start); //hipEventCreate(&stop); //hipEventRecord(start, 0); biCGStab2(status, handle, descr_A, jacSize, jacSize, nnzJac, dev_jacColNew, csrRowPtrJac, dev_jacNew, dev_stateVector, res, r_tld, p, p_hat, s, s_hat, v, t, dev_powerMismatch, jacSize); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&elapsedTime, start, stop); //cout<<"PBiCG-Stab: "<<elapsedTime<<" ms"<<endl; hipMemcpy(stateVector, dev_stateVector, jacSize*sizeof(double), hipMemcpyDeviceToHost); output.open("output.txt"); for(int i=0; i<jacSize; i++) output<<stateVector[i]<<endl; output.close(); //updateX<<<((jacSize+(threads-1))/threads), threads>>>(jacSize, numberOfBuses, dev_PQindex, dev_Vmag, dev_theta, dev_stateVector, dev_xi); //powerEqn <<<dimGrid, dimBlock>>>(dev_Peq1, dev_Qeq1, dev_y, dev_Vmag, dev_theta, numberOfBuses); //updateMismatch<<<((jacSize+(threads-1))/threads), threads>>>(numberOfBuses, jacSize, dev_Peq1, dev_Qeq1, dev_PQindex, dev_PQcalc, dev_PQspec, dev_powerMismatch); //createJacobian<<<dimGrid2, dimBlock>>>(numberOfBuses, jacSize, dev_Peq1, dev_Qeq1, dev_Vmag, dev_theta, dev_y, dev_jacobian, dev_PQindex); //check convergence //call Jacobian //loop cublas_status = hipblasShutdown(); if (cublas_status != HIPBLAS_STATUS_SUCCESS) { cout<<"Shut down error"<<endl; return 1; } //------------------------------------------------------------------------------------------------------------------------------------------------------------- cout<<"\nThere will be "<< numberOfBuses-1<<" P equations and "<<N_p<<" Q equations to solve."<<endl; cout<<"There will be a total of "<<numberOfBuses+N_p-1<<" equations for the system and "<< numberOfBuses+N_p-1<< " unknowns (V and delta) to be solved."<<endl; cout<<"The Jacobian matrix will be of size "<<2*(numberOfBuses-1)-N_g<<" x "<<2*(numberOfBuses-1)-N_g<<endl; //free CUDA memory hipFree(dev_fromBus); hipFree(dev_toBus); hipFree(dev_z); hipFree(dev_B); hipFree(dev_Pd); hipFree(dev_Qd); hipFree(yDev); hipFree(yRowDev); hipFree(yColDev); hipFree(dev_jac); hipFree(dev_jacRow); // hipFree(dev_jacRow2); hipFree(dev_jacCol); hipFree(dev_Vmag); hipFree(dev_theta); hipFree(dev_Peq); hipFree(dev_Qeq); hipFree(dev_powerMismatch); hipFree(dev_stateVector); hipFree(dev_PQindex); // hipFree(csrRowPtrJac); //delete all dynamic arrays delete[] busNum; delete[] busType; delete[] Pd; delete[] Qd; delete[] Vm; delete[] Va; delete[] Vmax; delete[] Vmin; delete[] fromBusArr; delete[] toBusArr; delete[] R; delete[] X; delete[] Z; delete[] B; delete[] P_eq; delete[] Q_eq; delete[] theta; return 0; } __global__ void createYBusSparse(int numLines, int numberOfBuses, int *fromBus, int* toBus, hipDoubleComplex *Z, hipDoubleComplex *B, hipDoubleComplex *y, int *yrow, int *ycol) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<numLines) { int i = fromBus[index]; int j = toBus[index]; yrow[index] = i; ycol[index] = j; yrow[index+numLines] = j; ycol[index+numLines] = i; y[index] = cuCsub(make_cuDoubleComplex(0,0), cuCdiv(make_cuDoubleComplex(1,0),Z[index])); y[index+numLines] = cuCsub(make_cuDoubleComplex(0,0), cuCdiv(make_cuDoubleComplex(1,0),Z[index])); hipDoubleComplex temp = cuCadd(cuCdiv(make_cuDoubleComplex(1,0),Z[index]),B[index]); atomicAddComplex(&y[i+(2*numLines)], temp); atomicAddComplex(&y[j+(2*numLines)], temp); if (index<numberOfBuses) { yrow[2*numLines+index] = index; ycol[2*numLines+index] = index; } } } __global__ void createYBusSparseConcise(int numLines, int numberOfBuses, int *fromBus, int* toBus, hipDoubleComplex *Z, hipDoubleComplex *B, hipDoubleComplex *y, int *yrow, int *ycol) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<numLines) { int i = fromBus[index]; int j = toBus[index]; yrow[index] = i; ycol[index] = j; y[index] = cuCsub(make_cuDoubleComplex(0,0), cuCdiv(make_cuDoubleComplex(1,0),Z[index])); hipDoubleComplex temp = cuCadd(cuCdiv(make_cuDoubleComplex(1,0),Z[index]),B[index]); atomicAddComplex(&y[i+numLines], temp); atomicAddComplex(&y[j+numLines], temp); if (index<numberOfBuses) { yrow[numLines+index] = index; ycol[numLines+index] = index; } } } __global__ void powerEqnSparse(double *P, double *Q, hipDoubleComplex* y, int* yrow, int* ycol, double *Vm, double *theta, int ynnz) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < ynnz) { atomicAdd2(&P[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*cos(theta[yrow[i]] - theta[ycol[i]])) + cuCimag(y[i])*sin(theta[yrow[i]] - theta[ycol[i]]))))); //atomicAdd2(&P[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*cos(theta[ycol[i]] - theta[yrow[i]])) + cuCimag(y[i])*sin(theta[ycol[i]] - theta[yrow[i]]))))); atomicAdd2(&Q[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*sin(theta[yrow[i]] - theta[ycol[i]])) - cuCimag(y[i])*cos(theta[yrow[i]] - theta[ycol[i]]))))); //atomicAdd2(&Q[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*sin(theta[ycol[i]] - theta[yrow[i]])) - cuCimag(y[i])*cos(theta[ycol[i]] - theta[yrow[i]]))))); } } __global__ void powerEqnSparseConcise(double *P, double *Q, hipDoubleComplex* y, int* yrow, int* ycol, double *Vm, double *theta, int ynnz, int numLines) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < numLines) { atomicAdd2(&P[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*cos(theta[yrow[i]] - theta[ycol[i]])) + cuCimag(y[i])*sin(theta[yrow[i]] - theta[ycol[i]]))))); atomicAdd2(&P[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*cos(theta[ycol[i]] - theta[yrow[i]])) + cuCimag(y[i])*sin(theta[ycol[i]] - theta[yrow[i]]))))); atomicAdd2(&Q[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*sin(theta[yrow[i]] - theta[ycol[i]])) - cuCimag(y[i])*cos(theta[yrow[i]] - theta[ycol[i]]))))); atomicAdd2(&Q[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*sin(theta[ycol[i]] - theta[yrow[i]])) - cuCimag(y[i])*cos(theta[ycol[i]] - theta[yrow[i]]))))); } if (i >= numLines && i < ynnz) { atomicAdd2(&P[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*cos(theta[yrow[i]] - theta[ycol[i]])) + cuCimag(y[i])*sin(theta[yrow[i]] - theta[ycol[i]]))))); atomicAdd2(&Q[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*sin(theta[yrow[i]] - theta[ycol[i]])) - cuCimag(y[i])*cos(theta[yrow[i]] - theta[ycol[i]]))))); } } __global__ void countNnzJac(bool* boolCheck, int *yrow, int* ycol, hipDoubleComplex *y, int *PVbuses, int N_g, int slackBus, int* dev_J12count, int* dev_J22count) { int i = blockIdx.x * blockDim.x + threadIdx.x; for (int j=0; j<N_g; j++) { if (yrow[i] != slackBus && ycol[i] == PVbuses[j]) { atomicAdd(dev_J12count, 1); //J12count++ boolCheck[i] = true; } if (ycol[i] != slackBus && yrow[i] == PVbuses[j]) { atomicAdd(dev_J12count, 1); //J12count++ boolCheck[i] = true; } } if (boolCheck[i] == true) atomicAdd(dev_J22count, 1); //J22count++ } //full jacobian __global__ void createJacobianSparse(int ynnz, int jacCount, int slackBus, int numBus, double* P, double *Q, double *Vmag, double *Vang, hipDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); //J11 jacRow[i] = yrow[yIdx] - 1; jacCol[i] = ycol[yIdx] - 1; //J12 jacRow[i + jacCount] = yrow[yIdx] - 1; jacCol[i + jacCount] = ycol[yIdx] + numBus - 2; //J21 jacRow[i + (2*jacCount)] = yrow[yIdx] + numBus - 2; jacCol[i + (2*jacCount)] = ycol[yIdx] - 1; //J22 jacRow[i + (3*jacCount)] = yrow[yIdx] + numBus - 2; jacCol[i + (3*jacCount)] = ycol[yIdx] + numBus - 2; if (yrow[yIdx] == ycol[yIdx]) { //J11 diagonal calculations jac[i] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); //J12 diagonal calculations jac[i + jacCount] = Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]) + P[yrow[yIdx]]; //J21 diagonal calculations jac[i + (2*jacCount)] = P[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]); //J22 diagonal calculations jac[i + (3*jacCount)] = Q[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx]); } else { //J11 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); //J12 off-diagonal calculations jac[i + jacCount] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); //J21 off-diagonal calculations jac[i + (2*jacCount)] = -Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); //J22 off-diagonal calculations jac[i + (3*jacCount)] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } } __global__ void createJacobianSparse3(int ynnz, int slackBus, int numBus, double* P, double *Q, double *Vmag, double *Vang, hipDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol, int* PQbuses, int N_p, bool* boolRow, bool* boolCol, int* J22row, int* J22col) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; int PQidx = blockIdx.y * blockDim.y + threadIdx.y; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { /*if (PQidx==0) { int i = atomicAdd(&counter, 1); int j = atomicAdd(&counter2, 1); //J11 jacRow[i] = yrow[yIdx] - 1; jacCol[i] = ycol[yIdx] - 1; if (yrow[yIdx] == ycol[yIdx]) { //J11 diagonal calculations jac[i] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); } else { //J11 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } }*/ if (PQidx<N_p) { if (ycol[yIdx] == PQbuses[PQidx]) { boolCol[yIdx] = true; int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) jacRow[i] = yrow[yIdx]; else jacRow[i] = yrow[yIdx] - 1; jacCol[i] = numBus + PQidx -1; //J12 J22col[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J12 diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]) + P[yrow[yIdx]]; } else { //J12 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } if (yrow[yIdx] == PQbuses[PQidx]) { boolRow[yIdx] = true; int i = atomicAdd(&counter, 1); //int j = atomicAdd(&counter2, 1); jacRow[i] = numBus + PQidx -1; //J21 if (ycol[yIdx] < slackBus) jacCol[i] = ycol[yIdx]; else jacCol[i] = ycol[yIdx] - 1; J22row[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J21 diagonal calculations jac[i] = P[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]); } else { //J21 off-diagonal calculations jac[i] = -Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } /*if (boolRow[yIdx] == true && boolCol[yIdx] == true) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] == ycol[yIdx]) { jac[i] = Q[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx]); } else { jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } }*/ } } } } __global__ void createJ12_J21(int ynnz, int slackBus, int numBus, double* P, double *Q, double *Vmag, double *Vang, hipDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol, int* PQbuses, int N_p, bool* boolRow, bool* boolCol, int* J22row, int* J22col) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { for (int PQidx=0; PQidx<N_p; PQidx++) { if (ycol[yIdx] == PQbuses[PQidx]) { boolCol[yIdx] = true; int i = atomicAdd(&counter, 1); jacCol[i] = numBus + PQidx -1; //J12 if (yrow[yIdx] < slackBus) jacRow[i] = yrow[yIdx]; else jacRow[i] = yrow[yIdx] - 1; J22col[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J12 diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]) + P[yrow[yIdx]]; } else { //J12 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } if (yrow[yIdx] == PQbuses[PQidx]) { boolRow[yIdx] = true; int i = atomicAdd(&counter, 1); jacRow[i] = numBus + PQidx -1; //J21 if (ycol[yIdx] < slackBus) jacCol[i] = ycol[yIdx]; else jacCol[i] = ycol[yIdx] - 1; J22row[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J21 diagonal calculations jac[i] = P[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]); } else { //J21 off-diagonal calculations jac[i] = -Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } } } } __global__ void createJ22(int ynnz, int numBus, double* P, double *Q, double *Vmag, double *Vang, hipDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol, bool* boolRow, bool* boolCol, int* J22row, int* J22col) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx<ynnz) { if (boolRow[yIdx] == true && boolCol[yIdx] == true) { int i = atomicAdd(&counter, 1); jacRow[i] = J22row[yIdx]; jacCol[i] = J22col[yIdx]; if (yrow[yIdx] == ycol[yIdx]) { jac[i] = Q[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx]); } else { jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } } __global__ void createJ11(int ynnz, int numLines, int numSlackLines, int slackBus, double* P, double *Q, double *Vmag, double *Vang, hipDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; int offset = numLines - ((numSlackLines - 1)/2); if (yIdx < numLines) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) { jacRow[i] = yrow[yIdx]; jacCol[i+offset] = yrow[yIdx]; } else { jacRow[i] = yrow[yIdx] - 1; jacCol[i+offset] = yrow[yIdx] - 1; } if (ycol[yIdx] < slackBus) { jacCol[i] = ycol[yIdx]; jacRow[i+offset] = ycol[yIdx]; } else { jacCol[i] = ycol[yIdx] - 1; jacRow[i+offset] = ycol[yIdx] - 1; } jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); jac[i+offset] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } if (yIdx >= numLines && yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) jacRow[i+offset] = yrow[yIdx]; else jacRow[i+offset] = yrow[yIdx] - 1; if (ycol[yIdx] < slackBus) jacCol[i+offset] = ycol[yIdx]; else jacCol[i+offset] = ycol[yIdx] - 1; jac[i+offset] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); } } } __global__ void createJ11Copy(int ynnz, int slackBus, double* P, double *Q, double *Vmag, double *Vang, hipDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) jacRow[i] = yrow[yIdx]; else jacRow[i] = yrow[yIdx] - 1; if (ycol[yIdx] < slackBus) jacCol[i] = ycol[yIdx]; else jacCol[i] = ycol[yIdx] - 1; if (yrow[yIdx] == ycol[yIdx]) //J11 diagonal calculations jac[i] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); else //J11 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } /*void biCGStab(cusparseDtatus_t status, hipsparseHandle_t handle, hipsparseMatDescr_t descr_A, int M, int N, int nnz, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b)*/ /*void biCGStab(cusparseDtatus_t status, hipsparseHandle_t handle, hipsparseMatDescr_t descr_A, hipsparseMatDescr_t descr_M, int M, int N, int nnz, int nnzJacPre, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, int* csrColIndPre, int* csrRowPtrPre, double* csrValPre, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b)*/ void biCGStab(hipsparseStatus_t status, hipsparseHandle_t handle, hipsparseMatDescr_t descr_A, hipsparseMatDescr_t descr_L, hipsparseMatDescr_t descr_U, cusparseSolveAnalysisInfo_t info_L, cusparseSolveAnalysisInfo_t info_U, int M, int N, int nnz, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, int* csrColIndPre, int* csrRowPtrPre, double* csrValPre, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b) { double bnorm, snorm, err, alpha=1.0, beta, omega=1.0, rho=1.0, rho_1, resid=0; //BiCG scalars - previously global variables int flag, iter; //For cusparse csrmv function double d_one = 1.0; double dzero = 0.0; double temp=0, temp2=0; //Setup cuBLAS cublasStatus cublas_status; cublas_status = hipblasInit(); if (cublas_status!=HIPBLAS_STATUS_SUCCESS) { cout<<"cuBLAS Initialization Error!"<<endl; return; } hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, x, &dzero, r); hipblasDscal(M, -1.0, r, 1); hipblasDaxpy(M, 1.0, b, 1, r, 1); hipblasDcopy(N, r, 1, p, 1); hipblasDcopy(N, r, 1, r_tld, 1); bnorm = hipblasDnrm2(N, b, 1); //To find Error: error = ||r||/||b|| err = hipblasDnrm2(N, r, 1)/bnorm; if (err<Tol) { cout<<"Solution has already converged"<<endl; return; } for (iter=0; iter<max_it; iter++) { rho_1 = rho; rho = hipblasDdot(N, r_tld, 1, r, 1); //cout<<"Rho: "<<rho<<endl; if (rho == 0) //check for breakdown break; //For every iteration after the first if (iter>0) { beta = (rho/rho_1)*(alpha/omega); //p = r+ beta(p-omega*v) hipblasDaxpy(N, -omega, v, 1, p, 1); hipblasDscal(N, beta, p, 1); hipblasDaxpy(N, 1.0, r, 1, p, 1); } //Preconditioner to find v //If M=I, this implies p_hat = p => can use hipMemcpyDeviceToDevice to transfer p to p_hat //p_hat = M\p cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrPre, csrColIndPre, info_L, p, t); //Here we are using t as a temporary vector - it is not needed in the algorithm at this point and saves memory of creating another vector cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrPre, csrColIndPre, info_U, t, p_hat); //p_hat = inv(M)*p //hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnzJacPre, &d_one, descr_M, csrValPre, csrRowPtrPre, csrColIndPre, p, &dzero, p_hat); //v = A*p_hat //hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p, &dzero, v); hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p_hat, &dzero, v); alpha = rho/hipblasDdot(N, r_tld, 1, v, 1); //alpha = rho/(r_tld,v) hipblasDaxpy(N, -alpha, v, 1, r, 1); //s=r - alpha*v hipblasDcopy(N, r, 1, s, 1); //hipblasDaxpy(N, alpha, p, 1, x, 1); hipblasDaxpy(N, alpha, p_hat, 1, x, 1); //x = x+ alpha*p //Check for convergence snorm = hipblasDnrm2(N, s, 1); if (snorm/bnorm < Tol) { resid = snorm/bnorm; break; } //Preconditioner to find t //M=I implies s = s_hat => t=As //s_hat = M\s cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrPre, csrColIndPre, info_L, r, t); cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrPre, csrColIndPre, info_U, t, s_hat); //s_hat = inv(M)*s //hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnzJacPre, &d_one, descr_M, csrValPre, csrRowPtrPre, csrColIndPre, s, &dzero, s_hat); //t=A*s_hat //hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s, &dzero, t); hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s_hat, &dzero, t); temp = hipblasDdot(N, t, 1, r, 1); temp2 = hipblasDdot(N, t, 1, t, 1); omega = temp/temp2; //x = x+ omega*s //hipblasDaxpy(N, omega, s, 1, x, 1); hipblasDaxpy(N, omega, s_hat, 1, x, 1); //r = s-omega*t hipblasDaxpy(N, -omega, t, 1, r, 1); err = hipblasDnrm2(N, r, 1)/bnorm; if (err<=Tol) { resid = hipblasDnrm2(N, s, 1)/bnorm; break; } if (omega == 0.0) break; //rho_1 = rho; } if (err <= Tol || snorm/bnorm < Tol) flag = 0; else if (omega == 0.0) flag = -2; else if (rho == 0) flag = -1; else flag = 1; if (!flag) cout<<"The solution converged with residual "<<resid<<" in "<<iter<<" iterations."<<endl; else cout<<"BiCGStab produced error "<<flag<<" after "<<iter<<" iterations."<<endl; //Shutdown cuBLAS cublas_status = hipblasShutdown(); if (cublas_status != HIPBLAS_STATUS_SUCCESS) { cout<<"Shut down error"<<endl; return; } } //---------------------------------------------------------------------------------------------------------------------------------- __global__ void updateX(int jacSize, int N, int *PQindex, double *Vmag, double *theta, double *stateVector, double *x) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<(N-1)) { theta[PQindex[index]] = theta[PQindex[index]] + stateVector[index]; stateVector[index] = x[index] + stateVector[index]; } else if(index>=(N-1) && index<jacSize) { Vmag[PQindex[index]] = Vmag[PQindex[index]] + stateVector[index]; stateVector[index] = x[index] + stateVector[index]; } } __global__ void updateMismatch(int N, int jacSize, double *P_eq, double *Q_eq, int *PQindex, double* PQcalc, double* PQspec, double *powerMismatch) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<(N-1)) { PQcalc[index] = P_eq[PQindex[index]]; } else if(index>=(N-1) && index<jacSize) { PQcalc[index] = Q_eq[PQindex[index]]; } powerMismatch[index] = PQspec[index] - PQcalc[index]; } __global__ void jacobiPrecond(int jacSize, double *jacobian, double *precInv) { int rowIdx = blockDim.y*blockIdx.y + threadIdx.y; int colIdx = blockDim.x*blockIdx.x + threadIdx.x; int index = rowIdx*jacSize + colIdx; //row major order - modify for column major precInv[index] = 0; if (rowIdx == colIdx) precInv[index] = 1/jacobian[index]; //inverse of strictly diagonal matrix is 1/a_ii } __device__ double radToDeg(double a) { return 57.29577951*a; } __device__ void atomicAddComplex(hipDoubleComplex *a, hipDoubleComplex b) { double *x = (double*)a; double *y = x+1; atomicAdd2(x, cuCreal(b)); atomicAdd2(y, cuCimag(b)); } __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } void biCGStab2(hipsparseStatus_t status, hipsparseHandle_t handle, hipsparseMatDescr_t descr_A, int M, int N, int nnz, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b, int jacSize) { double* csrValPre; hipEvent_t start, stop; float elapsedTime; ofstream triSolve1("Tri solve 1.txt"); ofstream triSolve2("Tri solve 2.txt"); ofstream spMV1("SpMV 1.txt"); ofstream spMV2("SpMV 2.txt"); ofstream ilu("ILU matrix.txt"); hipMalloc((void**)&csrValPre, nnz*sizeof(double)); hipMemcpy(csrValPre, csrValAdev, nnz*sizeof(double), hipMemcpyDeviceToDevice); hipsparseMatDescr_t descr_M = 0; hipsparseMatDescr_t descr_L = 0; hipsparseMatDescr_t descr_U = 0; cusparseSolveAnalysisInfo_t info_M; cusparseSolveAnalysisInfo_t info_L; cusparseSolveAnalysisInfo_t info_U; //Create and setup matrix descriptors for ILU preconditioner hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipsparseCreateMatDescr(&descr_M); hipsparseSetMatType(descr_M, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr_M, HIPSPARSE_INDEX_BASE_ZERO); hipsparseCreateMatDescr(&descr_L); hipsparseSetMatType(descr_L, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatFillMode(descr_L, HIPSPARSE_FILL_MODE_LOWER); hipsparseSetMatDiagType(descr_L, HIPSPARSE_DIAG_TYPE_UNIT); hipsparseCreateMatDescr(&descr_U); hipsparseSetMatType(descr_U, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr_U, HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatFillMode(descr_U, HIPSPARSE_FILL_MODE_UPPER); hipsparseSetMatDiagType(descr_U, HIPSPARSE_DIAG_TYPE_NON_UNIT); cusparseCreateSolveAnalysisInfo(&info_M); cusparseCreateSolveAnalysisInfo(&info_L); cusparseCreateSolveAnalysisInfo(&info_U); //Perform Analysis before calling ilu0(); NB analysis can be done on L and U at this point since sparsity pattern is the same cusparseDcsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, nnz, descr_M, csrValPre, csrRowPtrAdev, csrColIndAdev, info_M); cusparseDcsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, nnz, descr_L, csrValPre, csrRowPtrAdev, csrColIndAdev, info_L); cusparseDcsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, nnz, descr_U, csrValPre, csrRowPtrAdev, csrColIndAdev, info_U); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"ILU setup: "<<elapsedTime<<" ms"<<endl; //Perform ILU0 factorization hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); cusparseDcsrilu0(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, descr_M, csrValPre, csrRowPtrAdev, csrColIndAdev, info_M); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"ILU formation: "<<elapsedTime<<" ms"<<endl; //double *csrValPreHost = new double[nnz]; //hipMemcpy(csrValPreHost, csrValPre, nnz*sizeof(double), hipMemcpyDeviceToHost); //for (int i=0; i<nnz; i++) { // ilu<<csrValPreHost[i]<<endl; //} //ilu.close(); double bnorm, snorm=0, err, alpha=1.0, beta, omega=1.0, rho=1.0, rho_1, resid=0; //BiCG scalars - previously global variables int flag, iter; //For cusparse csrmv function double d_one = 1.0; double dzero = 0.0; double temp=0, temp2=0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, x, &dzero, r); hipblasDscal(M, -1.0, r, 1); hipblasDaxpy(M, 1.0, b, 1, r, 1); hipblasDcopy(N, r, 1, p, 1); hipblasDcopy(N, r, 1, r_tld, 1); bnorm = hipblasDnrm2(N, b, 1); //To find Error: error = ||r||/||b|| err = hipblasDnrm2(N, r, 1)/bnorm; if (err<Tol) { cout<<"Solution has already converged"<<endl; return; } for (iter=0; iter<max_it; iter++) { rho_1 = rho; rho = hipblasDdot(N, r_tld, 1, r, 1); //cout<<"Rho: "<<rho<<endl; if (rho == 0) //check for breakdown break; //For every iteration after the first if (iter>0) { beta = (rho/rho_1)*(alpha/omega); //p = r+ beta(p-omega*v) hipblasDaxpy(N, -omega, v, 1, p, 1); hipblasDscal(N, beta, p, 1); hipblasDaxpy(N, 1.0, r, 1, p, 1); } cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrAdev, csrColIndAdev, info_L, p, t); cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrAdev, csrColIndAdev, info_U, t, p_hat); //v = A*p_hat //hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p, &dzero, v); hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p_hat, &dzero, v); /*double *vhost = new double[jacSize]; hipMemcpy(vhost, v, jacSize*sizeof(double), hipMemcpyDeviceToHost); ofstream phat("v.txt"); for (int i=0; i<jacSize; i++) phat<<vhost[i]<<endl; phat.close();*/ alpha = rho/hipblasDdot(N, r_tld, 1, v, 1); //alpha = rho/(r_tld,v) hipblasDaxpy(N, -alpha, v, 1, r, 1); //s=r - alpha*v hipblasDcopy(N, r, 1, s, 1); //hipblasDaxpy(N, alpha, p, 1, x, 1); hipblasDaxpy(N, alpha, p_hat, 1, x, 1); //x = x+ alpha*p //Check for convergence snorm = hipblasDnrm2(N, s, 1); if (snorm/bnorm < Tol) { resid = snorm/bnorm; //break; } //Preconditioner to find t //M=I implies s = s_hat => t=As //s_hat = M\s cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrAdev, csrColIndAdev, info_L, r, t); cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrAdev, csrColIndAdev, info_U, t, s_hat); //t=A*s_hat //hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s, &dzero, t); hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s_hat, &dzero, t); temp = hipblasDdot(N, t, 1, r, 1); temp2 = hipblasDdot(N, t, 1, t, 1); omega = temp/temp2; //x = x+ omega*s //hipblasSaxpy(N, omega, s, 1, x, 1); hipblasDaxpy(N, omega, s_hat, 1, x, 1); //r = s-omega*t hipblasDaxpy(N, -omega, t, 1, r, 1); err = hipblasDnrm2(N, r, 1)/bnorm; if (err<=Tol) { resid = hipblasDnrm2(N, s, 1)/bnorm; break; } if (omega == 0.0) break; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout<<"BiCG Stab: "<<elapsedTime<<" ms"<<endl; if (err <= Tol || snorm/bnorm < Tol) flag = 0; else if (omega == 0.0) flag = -2; else if (rho == 0) flag = -1; else flag = 1; if (!flag) cout<<"The solution converged with residual "<<resid<<" in "<<iter<<" iterations."<<endl; else cout<<"BiCGStab produced error "<<flag<<" after "<<iter<<" iterations."<<endl; } void powerMethod(int *csrRowPtrA, int *csrColIndA, double *csrValA, hipsparseMatDescr_t descr_A, int nnz, double *x, double *dev_x, double *dev_c, hipsparseHandle_t handle, double* eigen, int n) { double temp, invEigen; int d, i=0; double numOne = 1.0; double numZero = 0.0; do { i++; //c = Ax ... MV mult //hipblasSgemv('n', n, n, 1, dev_a, n, dev_x, 1, 0, dev_c, 1); hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz, &numOne, descr_A, csrValA, csrRowPtrA, csrColIndA, dev_x, &numZero, dev_c); //copy c[] to x[] hipblasDcopy(n, dev_c, 1, dev_x, 1); temp = *eigen; //get max value in result x[] d = hipblasIdamax(n, dev_x, 1); hipMemcpy(x, dev_x, n*sizeof(double), hipMemcpyDeviceToHost); *eigen = x[d-1]; //factorize largest value out, obtain next vector invEigen = 1.0/(*eigen); hipblasDscal(n, invEigen, dev_x, 1); } while (fabs((*eigen)-temp)>0.00001); }
c4cd59a2b8534c1b8303d4e359837aecaf4877b0.cu
//#include <iostream> #include <string> #include <vector> #include "gpuFunctions.h" #include "TimingFunctions.h" using namespace std; #define threads 16 #define Tol ((double) 0.0001) #define max_it 2500 __device__ int counter; typedef thrust::tuple<int, int, float> tuple; typedef thrust::tuple<int, int, double> jacTuple; struct isZero { __host__ __device__ bool operator() (const tuple& a) { const double x = thrust::get<2>(a); return x == 0; }; }; struct is_PVbusRow { int id; is_PVbusRow(int num) : id(num) {}; __host__ __device__ bool operator () (const jacTuple& tup) { const int row = thrust::get<0> (tup); //const int col = thrust::get<1> (tup); return ((row == id)); } }; struct is_PVbusCol { int id; is_PVbusCol(int num) : id(num) {}; __host__ __device__ bool operator () (const jacTuple& tup) { const int col = thrust::get<1> (tup); return ((col == id)); } }; //Main Menu function - allows user to select power system for simulation void optionSelect(int option) { ifstream busdata, linedata; int numLines=0; string line; if ((option<1) || (option>8)) { cout<<"Not a valid option"<<endl; answerSelect(); } else if (option==1) { cout<<"----IEEE 14 bus system----\n"<<endl; linedata.open("linedata.txt"); while (getline (linedata, line)) { ++numLines; } linedata.close(); //pointer is at EOF. Need to close and reopen to stream data into variables busdata.open("busdata.txt"); linedata.open("linedata.txt"); IEEEStandardBusSystems(14, busdata, linedata, numLines); //calls function to execute data intialization for 14 bus system - N.B. read from text file (for now) answerSelect(); } else if (option==2){ cout<<"----IEEE 118 bus system----\n"<<endl; linedata.open("118Bus_LineData.txt"); while (getline (linedata, line)) { ++numLines; } linedata.close(); busdata.open("118BusData.txt"); linedata.open("118Bus_LineData.txt"); IEEEStandardBusSystems(118, busdata, linedata, numLines); answerSelect(); } else if (option==3) { cout<<"----IEEE 300 bus system----"<<endl; linedata.open("300Bus_LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("300BusData.txt"); linedata.open("300Bus_LineData.txt"); IEEEStandardBusSystems(300, busdata, linedata, numLines); answerSelect(); } else if (option==4) { cout<<"----Polish Winter Peak 2383 bus system----"<<endl; linedata.open("2383LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("2383BusData.txt"); linedata.open("2383LineData.txt"); IEEEStandardBusSystems(2383, busdata, linedata, numLines); answerSelect(); } else if (option==5) { cout<<"----Polish Summer Peak 3120 bus system----"<<endl; linedata.open("3120LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("3120BusData.txt"); linedata.open("3120LineData.txt"); IEEEStandardBusSystems(3120, busdata, linedata, numLines); answerSelect(); } else if (option==6) { cout<<"----PEGASE 6515 bus system----"<<endl; linedata.open("6515LineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("6515BusData.txt"); linedata.open("6515LineData.txt"); IEEEStandardBusSystems(6515, busdata, linedata, numLines); answerSelect(); } else if (option==7) { cout<<"----PEGASE 9241 bus system----"<<endl; linedata.open("9241linedata.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("9241busdata.txt"); linedata.open("9241linedata.txt"); IEEEStandardBusSystems(9241, busdata, linedata, numLines); answerSelect(); } /*else if (option==7) { cout<<"----PEGASE 13659 bus system----"<<endl; linedata.open("13659linedata.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("13659busdata.txt"); linedata.open("13659linedata.txt"); IEEEStandardBusSystems(13659, busdata, linedata, numLines); answerSelect(); }*/ else if (option==8) { cout<<"----Case 6 bus system----"<<endl; linedata.open("6BusLineData.txt"); while (getline(linedata, line)) { ++numLines; } linedata.close(); busdata.open("6BusData.txt"); linedata.open("6BusLineData.txt"); IEEEStandardBusSystems(6, busdata, linedata, numLines); answerSelect(); } } //Allows user to enter a valid option from the Main Menu void answerSelect() { char answer; int option; cout<<"\nDo you want to perform another simulation (y/n)?"<<endl; cin>>answer; if (cin.fail()) { cin.clear(); cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); cout<<"Invalid option"<<endl; } if ((answer=='y') || (answer=='Y')) { system("CLS"); //clears text on command line interface cout<<"\nPlease select one of the following options for simulation:"<<endl; //cout<<"1. IEEE 14 bus system\n2. IEEE 118 bus system\n3. IEEE 300 bus system\n4. Polish Winter Peak 2383 Bus System\n5. Polish Summer Peak 3120 bus system\n6. PEGASE 9241 bus system\n7. PEGASE 13659 bus system"<<endl; cout<<"1. IEEE 14 bus system\n2. IEEE 118 bus system\n3. IEEE 300 bus system\n4. Polish Winter Peak 2383 Bus System\n5. Polish Summer Peak 3120 bus system\n6. PEGASE 6515 bus system\n7. PEGASE 9241 bus system\n8. 6 bus system"<<endl; cout<<"Your option: "; cin>>option; optionSelect(option); } else if((answer=='n') || (answer=='N')) { cout<<"Thank you for using this program"<<endl; exit(0); //exits program } else { cout<<"Invalid response"<<endl; answerSelect(); } } //This is the "main" function in gpuFunctions.cu where the NR load flow solution of standard power systems occurs int IEEEStandardBusSystems(int numberOfBuses, ifstream &busData, ifstream &lineData, int numLines) { //cudaProfilerStart(); //-------------------------------------------------------------VARIABLE DECLARATION SECTION--------------------------------------------------------------------------- //bus data ifstream variables int bus_i, bustype, busDataIdx=0, lineDataIdx=0, N_g=0, N_p=0, jacSize=0, slackBus, numSlackLines = 0; double P,Q, Vmag, Vang, VMax, VMin; //line data ifstream variables int fromBus, toBus; double r, x, b; //dynamic arrays to hold bus data int *busNum = new int[numberOfBuses], *busType = new int[numberOfBuses], *tempBusNum = new int[numberOfBuses]; double *Pd = new double[numberOfBuses], *Qd = new double[numberOfBuses], *Vm=new double[numberOfBuses], *Va=new double[numberOfBuses], *Vmax=new double[numberOfBuses], *Vmin=new double[numberOfBuses], *P_eq = new double[numberOfBuses], *Q_eq = new double[numberOfBuses], *theta = new double[numberOfBuses]; //dynamic arrays to hold line data int *fromBusArr = new int[numLines], *toBusArr = new int[numLines]; int *PQindex; double *R = new double[numLines], *X = new double[numLines], *Bact = new double[numLines]; complex<double> *B1 = new complex<double> [numLines]; cuDoubleComplex *B = new cuDoubleComplex[numLines]; //N.B.: cuDoubleComplex data type can be used on host once CUDA library is included. cuDoubleComplex *Z = new cuDoubleComplex[numLines]; //Vectors needed for push_back() operations to build PQindex[] and PQspec[] vector<double> Pval_spec, Qval_spec, Pval_calc, Qval_calc; vector<int> Pindex, Qindex; vector<double> V_mag, V_ang; //for constructing stateVector[] from hot start double *PQspec, *PQcalc; //Device variables - to be allocated on and copied to the GPU int *dev_fromBus, *dev_toBus, *dev_PQindex, *dev_PQbuses, *dev_PVbuses; double *dev_Pd, *dev_Qd, *dev_Peq, *dev_Qeq, *dev_Vmag, *dev_theta, *dev_powerMismatch, *dev_stateVector, *dev_PQspec, *dev_PQcalc, *dev_xi; cuDoubleComplex *dev_z, *dev_B; //-----------------------sparse--------------------------- int ynnz = (2*numLines)+numberOfBuses; //createYBus() //int ynnz = numLines + numberOfBuses; //createYBusConcise() cuDoubleComplex *yHost = new cuDoubleComplex[ynnz]; int *yRowHost = new int[ynnz], *yColHost = new int[ynnz]; cuDoubleComplex *yDev; int *yRowDev, *yColDev; //arrays holding indices of off-diagonal elements in either upper or lower triangle int* dev_yUpperTriangleRow, *dev_yUpperTriangleCol; int *yUpperTriangleRow = new int[numLines], *yUpperTriangleCol = new int[numLines]; cudaMalloc((void**)&dev_yUpperTriangleRow, numLines*sizeof(int)); cudaMalloc((void**)&dev_yUpperTriangleCol, numLines*sizeof(int)); //-------------------------------------------------------- //In the linear system Ax=b, the vectors are of the same degree as the Jacobian matrix double *powerMismatch; //b in Ax = b double *stateVector; //x in Ax = b //----------------------------------------------Prompt user for flat start or hot start----------------------------------------- /*int startType; cout<<"What type of simulation would you like to perform:\n1)Flat start - V = 1+j0\n2)\"Hot\" start - V magnitude and angle come from previous solution"<<endl; cin>>startType;*/ //Error variables for mem copies and mem allocation on GPU cudaError_t cudaStat1, cudaStat2, cudaStat3, cudaStat4, cudaStat5, cudaStat6, cudaStat7, cudaStat8, cudaStat9, cudaStat10, cudaStat11, cudaStat12, cudaStat13, cudaStat14, cudaStat15, cudaStat16, cudaStat17, cudaStat18; //Variables for timing on GPU cudaEvent_t start, stop; float elapsedTime; //----------------------------------------------------------------------------------------------------------------------------------------------------------------- //Reading from busdata.txt (tab delimited) if (!busData) { cout<<"There was a problem reading the 'Bus Data' file."<<endl; return 1; } while (busData>>bus_i>>bustype>>P>>Q>>Vmag>>Vang>>VMax>>VMin) { busNum[busDataIdx]=bus_i; busType[busDataIdx]=bustype; Pd[busDataIdx]=P/100; Qd[busDataIdx]=Q/100; Vm[busDataIdx]=Vmag; Va[busDataIdx]=0.01745329252*Vang; Vmax[busDataIdx]=VMax; Vmin[busDataIdx]=VMin; busDataIdx++; } //For flat start, not using previous solution. V = 1+j0. Vmag = 1pu for PQ and slack buses. Vmag is known at PV buses. /*if (startType == 1) { for (int i=0; i<numberOfBuses; i++) { if (busType[i]!=2) Vm[i] = 1; Va[i] = 0; } }*/ vector<int> PVbusesVec; //Constructing PQindex vector which holds indices of PV and PQ buses for (int i=0; i<numberOfBuses; i++) { if (busType[i] == 1) { N_p++; //increment PQ bus counter since PQ is represented by 1 Pindex.push_back(i); Qindex.push_back(i); } if (busType[i] == 2) { N_g++; //increment PV bus counter since PV is represented by 2 Pindex.push_back(i); PVbusesVec.push_back(i); //For a PV bus there is no initial value of Q } if (busType[i] == 3) { slackBus = i; } } jacSize = numberOfBuses+N_p-1; //Degree of the Jacobian matrix and length of vectors in linear system int *PQbuses = &Qindex[0]; int *PVbuses = &PVbusesVec[0]; cudaMalloc((void**)&dev_PQbuses, Qindex.size()*sizeof(int)); cudaMalloc((void**)&dev_PVbuses, PVbusesVec.size()*sizeof(int)); cudaMemcpy(dev_PQbuses, PQbuses, Qindex.size()*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_PVbuses, PVbuses, PVbusesVec.size(), cudaMemcpyHostToDevice); //int jacSizeFull = (2*numberOfBuses) - 2; Pindex.insert(Pindex.end(), Qindex.begin(), Qindex.end()); //joins Pindex and Qindex to get a vector which holds indices of both PV and PQ buses. Store in Pindex PQindex = &Pindex[0]; //store vector Pindex as an array PQindex - compatible with GPU /*ofstream check("PQindex.txt"); for (unsigned int i=0; i<Pindex.size(); i++) check<<PQindex[i]<<endl; check.close(); check.open("PQbuses.txt"); for (unsigned int i=0; i<Qindex.size(); i++) check<<PQbuses[i]<<endl; check.close(); cout<<N_g<<endl; cout<<PVbusesVec.size()<<endl; check.open("PVbuses.txt"); for (unsigned int i=0; i<PVbusesVec.size(); i++) check<<PVbuses[i]<<endl; check.close();*/ //In the linear system Ax=b, the vectors are of the same degree as the Jacobian matrix stateVector = new double[jacSize]; //x in Ax = b powerMismatch = new double[jacSize]; //b in Ax = b //stateVector = new double[jacSizeFull]; //x in Ax = b //powerMismatch = new double[jacSizeFull]; //b in Ax = b //Allocation of GPU memory cudaStat1 = cudaMalloc((void**)&dev_fromBus, numLines*sizeof(int)); cudaStat2 = cudaMalloc((void**)&dev_toBus, numLines*sizeof(int)); cudaStat3 = cudaMalloc((void**)&dev_z, numLines*sizeof(cuDoubleComplex)); cudaStat4 = cudaMalloc((void**)&dev_B, numLines*sizeof(cuDoubleComplex)); cudaStat6 = cudaMalloc((void**)&dev_Pd, numberOfBuses*sizeof(double)); cudaStat7 = cudaMalloc((void**)&dev_Qd, numberOfBuses*sizeof(double)); cudaStat8 = cudaMalloc((void**)&dev_Vmag, numberOfBuses*sizeof(double)); cudaStat9 = cudaMalloc((void**)&dev_theta, numberOfBuses*sizeof(double)); cudaStat10 = cudaMalloc((void**)&dev_Peq, numberOfBuses*sizeof(double)); cudaStat11 = cudaMalloc((void**)&dev_Qeq, numberOfBuses*sizeof(double)); cudaStat12 = cudaMalloc((void**)&dev_powerMismatch, jacSize*sizeof(double)); cudaStat13 = cudaMalloc((void**)&dev_stateVector, jacSize*sizeof(double)); cudaStat14 = cudaMalloc((void**)&dev_PQindex, jacSize*sizeof(int)); cudaStat15 = cudaMalloc((void**)&dev_PQspec, jacSize*sizeof(double)); cudaStat16 = cudaMalloc((void**)&dev_PQcalc, jacSize*sizeof(double)); cudaStat17 = cudaMalloc((void**)&dev_xi, jacSize*sizeof(double)); /*cudaStat12 = cudaMalloc((void**)&dev_powerMismatch, jacSizeFull*sizeof(double)); cudaStat13 = cudaMalloc((void**)&dev_stateVector, jacSizeFull*sizeof(double)); cudaStat14 = cudaMalloc((void**)&dev_PQindex, jacSizeFull*sizeof(int)); cudaStat15 = cudaMalloc((void**)&dev_PQspec, jacSizeFull*sizeof(double)); cudaStat16 = cudaMalloc((void**)&dev_PQcalc, jacSizeFull*sizeof(double)); cudaStat17 = cudaMalloc((void**)&dev_xi, jacSizeFull*sizeof(double));*/ cudaStat18 = cudaMalloc((void**)&yDev, ynnz*sizeof(cuDoubleComplex)); cudaMalloc((void**)&yRowDev, ynnz*sizeof(int)); cudaMalloc((void**)&yColDev, ynnz*sizeof(int)); if (cudaStat1 != cudaSuccess || cudaStat2 != cudaSuccess || cudaStat3 != cudaSuccess || cudaStat4 != cudaSuccess || cudaStat6 != cudaSuccess || cudaStat7 != cudaSuccess || cudaStat8 != cudaSuccess || cudaStat9 != cudaSuccess || cudaStat10 != cudaSuccess || cudaStat11 != cudaSuccess || cudaStat12 != cudaSuccess || cudaStat13 != cudaSuccess || cudaStat14 != cudaSuccess || cudaStat15 != cudaSuccess || cudaStat16 != cudaSuccess || cudaStat17 != cudaSuccess || cudaStat18 != cudaSuccess) { cout<<"Device memory allocation failed"<<endl; return 1; } //-------------------------------------------------------------------BUS ADMITTANCE MATRIX CONSTRUCTION-------------------------------------------------------------- //Reading from linedata.txt (tab delimited) if(!lineData) { cout<<"There was a problem reading the 'Line Data' file"<<endl; return 1; } while (lineData>>fromBus>>toBus>>r>>x>>b) { fromBusArr[lineDataIdx] = fromBus; toBusArr[lineDataIdx] = toBus; R[lineDataIdx] = r; X[lineDataIdx] = x; //r+jX Bact[lineDataIdx] = b; lineDataIdx++; } for (int i=0; i<numLines; i++) { B[i] = make_cuDoubleComplex(0, Bact[i]/2); Z[i] = make_cuDoubleComplex(R[i], X[i]); } //This is used to number buses consecutively from 0-299 for 300 bus case for (int i=0; i<numberOfBuses; i++) { tempBusNum[i] = i; } //Arranging bus numbering for (int i=0; i<numLines; i++) { for (int j=0; j<numberOfBuses; j++) { if (fromBusArr[i] == busNum[j]) fromBusArr[i] = tempBusNum[j]; if (toBusArr[i] == busNum[j]) toBusArr[i] = tempBusNum[j]; } if (fromBusArr[i] == slackBus) { numSlackLines+=2; //numSlackLines++; //concise } if (toBusArr[i] == slackBus) { numSlackLines+=2; //numSlackLines++; //concise } } numSlackLines++; //for element y(0,0) for (int i=0; i<ynnz; i++) yHost[i] = make_cuDoubleComplex(0,0); //copy memory from host to device - parameters needed for createYbus() cudaStat1 = cudaMemcpy(dev_fromBus, fromBusArr, numLines*sizeof(int), cudaMemcpyHostToDevice); cudaStat2 = cudaMemcpy(dev_toBus, toBusArr, numLines*sizeof(int), cudaMemcpyHostToDevice); cudaStat3 = cudaMemcpy(dev_z, Z, numLines*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); cudaStat4 = cudaMemcpy(dev_B, B, numLines*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); cudaStat5 = cudaMemcpy(yDev, yHost, ynnz*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); if (cudaStat1 != cudaSuccess || cudaStat2 != cudaSuccess || cudaStat3 != cudaSuccess || cudaStat4 != cudaSuccess || cudaStat5 != cudaSuccess) { cout<<"Device memory copy failed"<<endl; return 1; } //grid and block dimensions - user defined dim3 dimBlock(threads, threads); //number of threads dim3 dimGrid((numberOfBuses+(threads-1))/threads, (numberOfBuses+(threads-1))/threads); dim3 ythreads(threads); dim3 yblocks((numLines+(threads-1))/threads); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //launch kernel once data has been copied to GPU createYBusSparse<<<yblocks, ythreads>>>(numLines, numberOfBuses, dev_fromBus, dev_toBus, dev_z, dev_B, yDev, yRowDev, yColDev); //createYBusSparseConcise<<<yblocks, ythreads>>>(numLines, numberOfBuses, dev_fromBus, dev_toBus, dev_z, dev_B, yDev, yRowDev, yColDev); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Y-bus Sparse: "<<elapsedTime<<" ms"<<endl; //------------------------------------Sorting yrow and ycol----------------------------------------------------- /*int *dev_yUpperTriangleRow2, *dev_yUpperTriangleCol2; //second array for sort_by_key() //int* dev_yRowTemp, *dev_yColTemp; //third set of arrays to sort row index using column index as key int *yUpperTriangleRow2, *yUpperTriangleCol2; yUpperTriangleRow2 = new int[numLines]; yUpperTriangleCol2 = new int[numLines]; cudaMalloc((void**)&dev_yUpperTriangleRow2, numLines*sizeof(int)); cudaMalloc((void**)&dev_yUpperTriangleCol2, numLines*sizeof(int)); //Copy row and column indices to duplicate arrays cudaMemcpy(dev_yUpperTriangleRow2, dev_yUpperTriangleRow, numLines*sizeof(int), cudaMemcpyDeviceToDevice); cudaMemcpy(dev_yUpperTriangleCol2, dev_yUpperTriangleCol, numLines*sizeof(int), cudaMemcpyDeviceToDevice); //wrapping device pointers to arrays in device memory to treat as thrust vectors and use thrust sort function thrust::device_ptr<int> yrowPtr(dev_yUpperTriangleRow); thrust::device_ptr<int> ycolPtr(dev_yUpperTriangleCol); //pointers to the original sparse Y thrust::device_ptr<int> ycolPtr2(dev_yUpperTriangleCol2); thrust::device_ptr<int> yrowPtr2(dev_yUpperTriangleRow2); //create wrapper to copy of yrow thrust::stable_sort_by_key(ycolPtr, ycolPtr+numLines, yrowPtr, thrust::less<int>()); //sort original yRow by original yCol using wrappers (both are sorted) thrust::stable_sort_by_key(yrowPtr2, yrowPtr2+numLines, ycolPtr2, thrust::less<int>()); //sort copy of yCol by copy of yRow (both are sorted) //Copy row and column indices (sorted by column) to duplicate yRow and yCol index host arrays cudaMemcpy(yUpperTriangleRow, dev_yUpperTriangleRow, numLines*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(yUpperTriangleCol, dev_yUpperTriangleCol, numLines*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(yUpperTriangleRow2, dev_yUpperTriangleRow2, numLines*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(yUpperTriangleCol2, dev_yUpperTriangleCol2, numLines*sizeof(int), cudaMemcpyDeviceToHost);*/ //------------------------------------------------------------------------------------------------------------------ cudaMemcpy(yHost, yDev, ynnz*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost); cudaMemcpy(yRowHost, yRowDev, ynnz*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(yColHost, yColDev, ynnz*sizeof(int), cudaMemcpyDeviceToHost); ofstream output; output.open("SparseYbus.txt"); for (int i=0; i<ynnz; i++) { output<<yRowHost[i]<<"\t"<<yColHost[i]<<"\t"<<"("<<cuCreal(yHost[i])<<","<<cuCimag(yHost[i])<<")"<<endl; } output.close(); //--------------------------------------------------------------------------Power Equations-------------------------------------------------------------------------- for (int i=0; i<numberOfBuses; i++) { theta[i] = 0; P_eq[i] = 0; Q_eq[i] = 0; } cudaStat1 = cudaMemcpy(dev_Vmag, Vm, numberOfBuses*sizeof(double), cudaMemcpyHostToDevice); //cudaStat2 = cudaMemcpy(dev_theta, Va, numberOfBuses*sizeof(double), cudaMemcpyHostToDevice); //FOR HOT START cudaStat2 = cudaMemcpy(dev_theta, theta, numberOfBuses*sizeof(double), cudaMemcpyHostToDevice); //FOR FLAT START cudaStat3 = cudaMemcpy(dev_Peq, P_eq, numberOfBuses*sizeof(double), cudaMemcpyHostToDevice); cudaStat4 = cudaMemcpy(dev_Qeq, Q_eq, numberOfBuses*sizeof(double), cudaMemcpyHostToDevice); if (cudaStat1 != cudaSuccess || cudaStat2 != cudaSuccess || cudaStat3 != cudaSuccess || cudaStat4 != cudaSuccess){ cout<<"Device memory copy failed"<<endl; return 1; } dim3 pthreads(threads); dim3 pblock((ynnz+(threads-1))/threads); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); powerEqnSparse<<<pblock, pthreads>>>(dev_Peq, dev_Qeq, yDev, yRowDev, yColDev, dev_Vmag, dev_theta, ynnz); //powerEqnSparseConcise<<<pblock, pthreads>>>(dev_Peq, dev_Qeq, yDev, yRowDev, yColDev, dev_Vmag, dev_theta, ynnz, numLines); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Sparse PQ eqns: "<<elapsedTime<<" ms"<<endl; cudaMemcpy(P_eq, dev_Peq, numberOfBuses*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(Q_eq, dev_Qeq, numberOfBuses*sizeof(double), cudaMemcpyDeviceToHost); output.open("power equations sparse.txt"); for (int i=0; i<numberOfBuses; i++) { output<<P_eq[i]<<endl; } output<<endl; for (int i=0; i<numberOfBuses; i++) { output<<Q_eq[i]<<endl; } cout<<endl; output.close(); //To construct the power mismatch vector //SPECIFIED values of P and Q read from text file into Pd and Qd. Place these values in separate vectors for SPECIFIED value of P and Q for (int i=0; i<numberOfBuses; i++) { if (busType[i]!=3) { //if bus is not a slack bus Pval_spec.push_back(Pd[i]); Pval_calc.push_back(P_eq[i]); V_ang.push_back(theta[i]); } if (busType[i]==1) { //if bus is a PQ bus Qval_spec.push_back(Qd[i]); Qval_calc.push_back(Q_eq[i]); V_mag.push_back(Vm[i]); } } //power mismatch vector for Full Jacobian matrix (minus slack bus) /*for (int i=0; i<numberOfBuses; i++) { if (busType[i]!=3) { Pval_spec.push_back(Pd[i]); Pval_calc.push_back(P_eq[i]); V_ang.push_back(theta[i]); Qval_spec.push_back(Qd[i]); Qval_calc.push_back(Q_eq[i]); V_mag.push_back(Vm[i]); } }*/ Pval_spec.insert(Pval_spec.end(), Qval_spec.begin(), Qval_spec.end()); //Append vectors yield vector of SPECIFIED real and rxve power PQspec = &Pval_spec[0]; //Create an array and store the appended vector in it to use on the GPU //cudaMemcpy(dev_PQspec, PQspec, jacSizeFull*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_PQspec, PQspec, jacSize*sizeof(double), cudaMemcpyHostToDevice); Pval_calc.insert(Pval_calc.end(), Qval_calc.begin(), Qval_calc.end()); //Appended vectors yield vector of CALCULATED real and rxve power PQcalc = &Pval_calc[0]; //Assign the appended vector to an array for use on the GPU //Append these vectors to get stateVector (Vang and Vmag) V_ang.insert(V_ang.end(), V_mag.begin(), V_mag.end()); stateVector = &V_ang[0]; output.open("stateVector.txt"); for (unsigned int i=0; i<V_ang.size(); i++) output<<stateVector[i]<<"; "<<endl; output.close(); //Power mismatch vector, b in Ax=b, is found by subtracting calculated from specified. output.open("powerMismatch.txt"); for (unsigned int i=0; i<Pval_spec.size(); i++) { powerMismatch[i] = PQspec[i] - PQcalc[i]; output<<powerMismatch[i]<<";"<<endl; } output.close(); //------------------------------------------------Creation of Jacobian Matrix---------------------------------------------------------- cout<<"Y nnz: "<<ynnz<<endl; //int nnzJac= (ynnz + numLines - numSlackLines)*4; //concise int nnzJac= (ynnz - numSlackLines)*4; cout<<"NNZ Jac before: "<<nnzJac<<endl; bool *dev_boolRow, *dev_boolCol; int *dev_J22row, *dev_J22col; cudaMalloc((void**)&dev_boolRow, ynnz*sizeof(bool)); cudaMalloc((void**)&dev_boolCol, ynnz*sizeof(bool)); cudaMalloc((void**)&dev_J22row, ynnz*sizeof(int)); cudaMalloc((void**)&dev_J22col, ynnz*sizeof(int)); //Initial algorithm to count nnzJac bool *boolCheck = new bool[numLines]; for (int i=0; i<numLines; i++) boolCheck[i] = 0; bool *dev_boolCheck; int J12count=0; int J22count = 0; int* dev_J12count, *dev_J22count; cudaMalloc((void**)&dev_J12count, sizeof(int)); cudaMalloc((void**)&dev_J22count, sizeof(int)); cudaMalloc((void**)&dev_boolCheck, numLines*sizeof(bool)); cudaMemcpy(dev_boolCheck, boolCheck, numLines*sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(dev_J12count, &J12count, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_J22count, &J22count, sizeof(int), cudaMemcpyHostToDevice); countNnzJac<<<yblocks,ythreads>>>(dev_boolCheck, yRowDev, yColDev, yDev, dev_PVbuses, N_g, slackBus, dev_J12count, dev_J22count); cudaMemcpy(&J12count, dev_J12count, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&J22count, dev_J22count, sizeof(int), cudaMemcpyDeviceToHost); cout<<J12count<<endl; cout<<J22count<<endl; /*startCounter(); for (int i=0; i<numLines; i++) { for (int j=0; j<N_g; j++) { if (yRowHost[i] != slackBus && yColHost[i] == PVbuses[j]) { J12count++; boolCheck[i] = true; } if (yColHost[i] != slackBus && yRowHost[i] == PVbuses[j]) { J12count++; boolCheck[i] = true; } } if (boolCheck[i] == true) J22count++; }*/ J12count+=N_g; J22count*=2; J22count+=N_g; nnzJac = nnzJac - (J12count*2) - J22count; //cout<<"Time taken to find nnzJac: "<<getCounter()<<endl; cout<<"nnzJac: "<<nnzJac<<endl; //-------------------------------------------------------------------------------------------- //int jacCount = ynnz - numSlackLines; //int nnzJac = jacCount*4; //for full jacobian (not reduced based on PQ buses) int h_counter = 0; cudaMemcpyToSymbol(counter, &h_counter, sizeof(int), 0, cudaMemcpyHostToDevice); //Jacobian variables int *jacRow, *jacCol; double* jac; jacRow = new int[nnzJac]; jacCol = new int[nnzJac]; jac = new double[nnzJac]; int *dev_jacRow, *dev_jacCol; double* dev_jac; cudaMalloc((void**)&dev_jacRow, nnzJac*sizeof(int)); cudaMalloc((void**)&dev_jacCol, nnzJac*sizeof(int)); cudaMalloc((void**)&dev_jac, nnzJac*sizeof(double)); cudaStat2 = cudaMemcpy(dev_PQindex, PQindex, jacSize*sizeof(int), cudaMemcpyHostToDevice); //dim3 dimGridJac2((ynnz+(threads-1))/threads, (jacSize+(threads-1))/threads, (jacSize+3)/4); //dim3 dimBlockJac2(threads, threads, 4); //dim3 dimGridJac((ynnz+(threads-1))/threads, (N_p+(threads-1)/threads)); dim3 dimBlockJac(threads, threads); dim3 dimGridJ22((ynnz+(threads-1))/threads); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //createJ11<<<dimGridJ22, threads>>>(ynnz, numLines, numSlackLines, slackBus, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); createJ11Copy<<<dimGridJ22, threads>>>(ynnz, slackBus, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //createJacobianSparse3<<<dimGridJac, threads>>>(ynnz, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQbuses, N_p, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); createJ12_J21<<<dimGridJ22, threads>>>(ynnz, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQbuses, N_p, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); createJ22<<<dimGridJ22, threads>>>(ynnz, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); //createJacobianSparse3<<<dimGridJac, threads>>>(ynnz, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQbuses, N_p, dev_boolRow, dev_boolCol, dev_J22row, dev_J22col); //createJacobianSparse2<<<dimGridJac2, dimBlockJac2>>>(ynnz, jacCount, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol, dev_PQindex, N_g, N_p, jacSize); //createJacobianSparse<<<dimGridJac, threads>>>(ynnz, jacCount, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl; /*cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); createJacobianSparse<<<dimGridJ22, threads>>>(ynnz, jacCount, slackBus, numberOfBuses, dev_Peq, dev_Qeq, dev_Vmag, dev_theta, yDev, yRowDev, yColDev, dev_jac, dev_jacRow, dev_jacCol); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"GPU elapsed time - Jacobian (sparse): "<<elapsedTime<<" ms"<<endl;*/ //----------------------------------------Thrust remove_if() trial--------------------------------------- /*thrust::device_ptr<int> rowVecPtr(dev_jacRow); thrust::device_ptr<int> colVecPtr(dev_jacCol); thrust::device_ptr<double> valVecPtr(dev_jac); thrust::device_vector<int> rowVec(rowVecPtr, rowVecPtr+nnzJac); thrust::device_vector<int> colVec(colVecPtr, colVecPtr+nnzJac); thrust::device_vector<double> valVec(valVecPtr, valVecPtr+nnzJac); typedef thrust::device_vector<int>::iterator intDiter; //typedef iterator for row and col typedef thrust::device_vector<double>::iterator doubleDiter; //typedef iterator for double values typedef thrust::tuple<intDiter, intDiter, doubleDiter> iteratorTuple; //iterator tuple typedef thrust::zip_iterator<iteratorTuple> zipIt; //zip vectors together using tuple cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i=0; i<N_g; i++) { zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+rowVec.size(); int eraseJacIdx = (numberOfBuses - 2) + PVbuses[i]; zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, is_PVbusRow(eraseJacIdx)); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Remove jac row indices: "<<elapsedTime<<" ms"<<endl; cout<<rowVec.size()<<endl; cout<<colVec.size()<<endl; cout<<valVec.size()<<endl; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int i=0; i<N_g; i++) { zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+rowVec.size(); int eraseJacIdx = (numberOfBuses - 2) + PVbuses[i]; zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, is_PVbusCol(eraseJacIdx)); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Remove jac col indices: "<<elapsedTime<<" ms"<<endl; cout<<rowVec.size()<<endl; cout<<colVec.size()<<endl; cout<<valVec.size()<<endl; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+rowVec.size(); zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, isZero()); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Remove jac values: "<<elapsedTime<<" ms"<<endl; cout<<rowVec.size()<<endl; cout<<colVec.size()<<endl; cout<<valVec.size()<<endl; int* dev_jacRowNew = thrust::raw_pointer_cast(&rowVec[0]); int* dev_jacColNew = thrust::raw_pointer_cast(&colVec[0]); double* dev_jacNew = thrust::raw_pointer_cast(&valVec[0]);*/ //------------------------------------------------------------------------------------------------------- cudaMemcpy(jacRow, dev_jacRow, nnzJac*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(jacCol, dev_jacCol, nnzJac*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(jac, dev_jac, nnzJac*sizeof(double), cudaMemcpyDeviceToHost); output.open("GPUJac.txt"); for (int i=0; i<nnzJac; i++) output<<jacRow[i]<<"\t"<<jacCol[i]<<"\t"<<jac[i]<<endl; output.close(); //--------------------------------------------------------------SOLUTION OF LINEAR SYSTEM----------------------------------------------------------------- //Removing zeros from dev_jac after calculation //Create wrapper around device pointer thrust::device_ptr<int> rowVecPtr(dev_jacRow); thrust::device_ptr<int> colVecPtr(dev_jacCol); thrust::device_ptr<double> valuesPtr(dev_jac); //Copy to device_vector for functionality thrust::device_vector<int> rowVec(rowVecPtr, rowVecPtr+nnzJac); thrust::device_vector<int> colVec(colVecPtr, colVecPtr+nnzJac); thrust::device_vector<double> valVec(valuesPtr, valuesPtr+nnzJac); //typedef thrust::tuple<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator, thrust::device_vector<float>::iterator> iteratorTuple; typedef thrust::device_vector<int>::iterator intDiter; //typedef iterator for row and col typedef thrust::device_vector<double>::iterator doubleDiter; //typedef iterator for double values typedef thrust::tuple<intDiter, intDiter, doubleDiter> iteratorTuple; //iterator tuple typedef thrust::zip_iterator<iteratorTuple> zipIt; //zip vectors together using tuple zipIt zipBegin = thrust::make_zip_iterator(thrust::make_tuple(rowVec.begin(), colVec.begin(), valVec.begin())); zipIt zipEnd = zipBegin+nnzJac; //Timing remove_if() and erase() operations cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); zipIt newEnd = thrust::remove_if(zipBegin, zipEnd, isZero()); iteratorTuple endTuple = newEnd.get_iterator_tuple(); rowVec.erase(thrust::get<0>(endTuple), rowVec.end()); colVec.erase(thrust::get<1>(endTuple), colVec.end()); valVec.erase(thrust::get<2>(endTuple), valVec.end()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Thrust remove_if() for Jacobian: "<<elapsedTime<<" ms"<<endl; cout<<"nnzJac after removing zeros: "<<rowVec.size()<<endl; int* dev_jacRowNew = thrust::raw_pointer_cast(&rowVec[0]); int* dev_jacColNew = thrust::raw_pointer_cast(&colVec[0]); double* dev_jacNew = thrust::raw_pointer_cast(&valVec[0]); nnzJac = rowVec.size(); //need to sort COO format Jacobian matrix in row-major order to get CSR format int* dev_jacRow2, *dev_jacCol2; //second array for sort_by_key() cudaMalloc((void**)&dev_jacRow2, nnzJac*sizeof(int)); cudaMalloc((void**)&dev_jacCol2, nnzJac*sizeof(int)); cudaMemcpy(dev_jacCol2, dev_jacColNew, nnzJac*sizeof(int), cudaMemcpyDeviceToDevice); //wrapping device pointers to arrays in device memory to treat as thrust vectors and use thrust sort function thrust::device_ptr<int> rowVecPtr2(dev_jacRowNew); thrust::device_ptr<int> colVecPtr2(dev_jacColNew); thrust::device_ptr<double> valuesPtr2(dev_jacNew); thrust::device_ptr<int> colVec2(dev_jacCol2); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //Perform sorting (sort column indices and values) based on rows (corresponding array sorting) thrust::stable_sort_by_key(colVecPtr2, colVecPtr2+nnzJac, rowVecPtr2, thrust::less<int>()); thrust::stable_sort_by_key(colVec2, colVec2+nnzJac, valuesPtr2, thrust::less<int>()); cudaMemcpy(dev_jacRow2, dev_jacRowNew, nnzJac*sizeof(int), cudaMemcpyDeviceToDevice); thrust::device_ptr<int> rowVec2(dev_jacRow2); thrust::stable_sort_by_key(rowVecPtr2, rowVecPtr2+nnzJac, colVecPtr2, thrust::less<int>()); thrust::stable_sort_by_key(rowVec2, rowVec2+nnzJac, valuesPtr2, thrust::less<int>()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Sorting Jacobian to convert to CSR: "<<elapsedTime<<" ms"<<endl; //Required to represent coefficient matrix (Jacobian) in sparse (CSR) format int *csrRowPtrJac; //Setup cuSPARSE cusparseStatus_t status; cusparseHandle_t handle = 0; cusparseMatDescr_t descr_A = 0; //Initialize cuSPARSE status = cusparseCreate(&handle); if (status != CUSPARSE_STATUS_SUCCESS) { cout<<"CUSPARSE Library Initialization failed"<<endl; return 1; } cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //Create and setup matrix descriptor for Coefficient Matrix cusparseCreateMatDescr(&descr_A); cusparseSetMatType(descr_A, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr_A, CUSPARSE_INDEX_BASE_ZERO); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"CUPARSE Initialization: "<<elapsedTime<<" ms"<<endl; //cudaStat1 = cudaMalloc((void**)&csrRowPtrJac, (jacSizeFull+1)*sizeof(int)); cudaStat1 = cudaMalloc((void**)&csrRowPtrJac, (jacSize+1)*sizeof(int)); if (cudaStat1 != cudaSuccess) { cout<<"Device memory allocation failed."<<endl; return 1; } cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //Convert matrix to sparse format (CSR) cusparseXcoo2csr(handle, dev_jacRowNew, nnzJac, jacSize, csrRowPtrJac, CUSPARSE_INDEX_BASE_ZERO); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"Converting COO to CSR: "<<elapsedTime<<" ms"<<endl; /*double eigen = 0.0; double *initVec = new double[jacSize]; double *dev_initVec, *dev_c; for (int i=0; i<jacSize; i++) initVec[i] = 1; cudaStat1 = cudaMalloc((void**)&dev_initVec, jacSize*sizeof(double)); cudaStat2 = cudaMalloc((void**)&dev_c, jacSize*sizeof(double)); if (cudaStat1 != cudaSuccess || cudaStat2 != cudaSuccess) { cout<<"Device memory allocation failed - BiCGStab variables."<<endl; return 1; } cudaMemcpy(dev_initVec, initVec, jacSize*sizeof(double), cudaMemcpyHostToDevice); powerMethod(csrRowPtrJac, dev_jacColNew, dev_jacNew, descr_A, nnzJac, initVec, dev_initVec, dev_c, handle, &eigen, jacSize); cout<<"Max eigenvalue of Jacobian matrix is: "<<eigen<<endl;*/ int* h_jacRowNew, *h_jacColNew, *h_csrJacRowPtr; double* h_jacNew; h_jacRowNew = new int[nnzJac]; h_jacColNew = new int[nnzJac]; h_jacNew = new double[nnzJac]; h_csrJacRowPtr = new int[jacSize+1]; cudaMemcpy(h_jacRowNew, dev_jacRowNew, nnzJac*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_jacColNew, dev_jacColNew, nnzJac*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_jacNew, dev_jacNew, nnzJac*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_csrJacRowPtr, csrRowPtrJac, (jacSize+1)*sizeof(int), cudaMemcpyDeviceToHost); output.open("new jacobian after elimination and sorting.txt"); for (int i=0; i<nnzJac; i++) output<<h_jacRowNew[i]<<"\t"<<h_jacColNew[i]<<"\t"<<h_jacNew[i]<<endl; for (int i=0; i<jacSize+1; i++) output<<h_csrJacRowPtr[i]<<endl; output.close(); //Setup cuBLAS cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cublasStatus cublas_status; cublas_status = cublasInit(); if (cublas_status!=CUBLAS_STATUS_SUCCESS) { cout<<"cuBLAS Initialization Error!"<<endl; return 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"CUBLAS Initialization: "<<elapsedTime<<" ms"<<endl; //Vectors required in BiCGStab double *res, *r_tld, *p, *s, *t, *v, *p_hat, *s_hat; /*cudaStat1 = cudaMalloc((void**)&res, jacSizeFull*sizeof(double)); cudaStat2 = cudaMalloc((void**)&r_tld, jacSizeFull*sizeof(double)); cudaStat3 = cudaMalloc((void**)&p, jacSizeFull*sizeof(double)); cudaStat4 = cudaMalloc((void**)&p_hat, jacSizeFull*sizeof(double)); cudaStat5 = cudaMalloc((void**)&s, jacSizeFull*sizeof(double)); cudaStat6 = cudaMalloc((void**)&s_hat, jacSizeFull*sizeof(double)); cudaStat7 = cudaMalloc((void**)&v, jacSizeFull*sizeof(double)); cudaStat8 = cudaMalloc((void**)&t, jacSizeFull*sizeof(double));*/ cudaStat1 = cudaMalloc((void**)&res, jacSize*sizeof(double)); cudaStat2 = cudaMalloc((void**)&r_tld, jacSize*sizeof(double)); cudaStat3 = cudaMalloc((void**)&p, jacSize*sizeof(double)); cudaStat4 = cudaMalloc((void**)&p_hat, jacSize*sizeof(double)); cudaStat5 = cudaMalloc((void**)&s, jacSize*sizeof(double)); cudaStat6 = cudaMalloc((void**)&s_hat, jacSize*sizeof(double)); cudaStat7 = cudaMalloc((void**)&v, jacSize*sizeof(double)); cudaStat8 = cudaMalloc((void**)&t, jacSize*sizeof(double)); if (cudaStat1 != cudaSuccess || cudaStat2 != cudaSuccess || cudaStat3 != cudaSuccess || cudaStat4 != cudaSuccess || cudaStat5 != cudaSuccess || cudaStat6 != cudaSuccess || cudaStat7 != cudaSuccess || cudaStat8 != cudaSuccess) { cout<<"Device memory allocation failed - BiCGStab variables."<<endl; return 1; } cudaMemcpy(dev_stateVector, stateVector, jacSize*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_xi, stateVector, jacSize*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_powerMismatch, powerMismatch, jacSize*sizeof(double), cudaMemcpyHostToDevice); /*cudaMemcpy(dev_stateVector, stateVector, jacSizeFull*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_xi, stateVector, jacSizeFull*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_powerMismatch, powerMismatch, jacSizeFull*sizeof(double), cudaMemcpyHostToDevice);*/ ; //cudaEventCreate(&start); //cudaEventCreate(&stop); //cudaEventRecord(start, 0); biCGStab2(status, handle, descr_A, jacSize, jacSize, nnzJac, dev_jacColNew, csrRowPtrJac, dev_jacNew, dev_stateVector, res, r_tld, p, p_hat, s, s_hat, v, t, dev_powerMismatch, jacSize); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&elapsedTime, start, stop); //cout<<"PBiCG-Stab: "<<elapsedTime<<" ms"<<endl; cudaMemcpy(stateVector, dev_stateVector, jacSize*sizeof(double), cudaMemcpyDeviceToHost); output.open("output.txt"); for(int i=0; i<jacSize; i++) output<<stateVector[i]<<endl; output.close(); //updateX<<<((jacSize+(threads-1))/threads), threads>>>(jacSize, numberOfBuses, dev_PQindex, dev_Vmag, dev_theta, dev_stateVector, dev_xi); //powerEqn <<<dimGrid, dimBlock>>>(dev_Peq1, dev_Qeq1, dev_y, dev_Vmag, dev_theta, numberOfBuses); //updateMismatch<<<((jacSize+(threads-1))/threads), threads>>>(numberOfBuses, jacSize, dev_Peq1, dev_Qeq1, dev_PQindex, dev_PQcalc, dev_PQspec, dev_powerMismatch); //createJacobian<<<dimGrid2, dimBlock>>>(numberOfBuses, jacSize, dev_Peq1, dev_Qeq1, dev_Vmag, dev_theta, dev_y, dev_jacobian, dev_PQindex); //check convergence //call Jacobian //loop cublas_status = cublasShutdown(); if (cublas_status != CUBLAS_STATUS_SUCCESS) { cout<<"Shut down error"<<endl; return 1; } //------------------------------------------------------------------------------------------------------------------------------------------------------------- cout<<"\nThere will be "<< numberOfBuses-1<<" P equations and "<<N_p<<" Q equations to solve."<<endl; cout<<"There will be a total of "<<numberOfBuses+N_p-1<<" equations for the system and "<< numberOfBuses+N_p-1<< " unknowns (V and delta) to be solved."<<endl; cout<<"The Jacobian matrix will be of size "<<2*(numberOfBuses-1)-N_g<<" x "<<2*(numberOfBuses-1)-N_g<<endl; //free CUDA memory cudaFree(dev_fromBus); cudaFree(dev_toBus); cudaFree(dev_z); cudaFree(dev_B); cudaFree(dev_Pd); cudaFree(dev_Qd); cudaFree(yDev); cudaFree(yRowDev); cudaFree(yColDev); cudaFree(dev_jac); cudaFree(dev_jacRow); // cudaFree(dev_jacRow2); cudaFree(dev_jacCol); cudaFree(dev_Vmag); cudaFree(dev_theta); cudaFree(dev_Peq); cudaFree(dev_Qeq); cudaFree(dev_powerMismatch); cudaFree(dev_stateVector); cudaFree(dev_PQindex); // cudaFree(csrRowPtrJac); //delete all dynamic arrays delete[] busNum; delete[] busType; delete[] Pd; delete[] Qd; delete[] Vm; delete[] Va; delete[] Vmax; delete[] Vmin; delete[] fromBusArr; delete[] toBusArr; delete[] R; delete[] X; delete[] Z; delete[] B; delete[] P_eq; delete[] Q_eq; delete[] theta; return 0; } __global__ void createYBusSparse(int numLines, int numberOfBuses, int *fromBus, int* toBus, cuDoubleComplex *Z, cuDoubleComplex *B, cuDoubleComplex *y, int *yrow, int *ycol) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<numLines) { int i = fromBus[index]; int j = toBus[index]; yrow[index] = i; ycol[index] = j; yrow[index+numLines] = j; ycol[index+numLines] = i; y[index] = cuCsub(make_cuDoubleComplex(0,0), cuCdiv(make_cuDoubleComplex(1,0),Z[index])); y[index+numLines] = cuCsub(make_cuDoubleComplex(0,0), cuCdiv(make_cuDoubleComplex(1,0),Z[index])); cuDoubleComplex temp = cuCadd(cuCdiv(make_cuDoubleComplex(1,0),Z[index]),B[index]); atomicAddComplex(&y[i+(2*numLines)], temp); atomicAddComplex(&y[j+(2*numLines)], temp); if (index<numberOfBuses) { yrow[2*numLines+index] = index; ycol[2*numLines+index] = index; } } } __global__ void createYBusSparseConcise(int numLines, int numberOfBuses, int *fromBus, int* toBus, cuDoubleComplex *Z, cuDoubleComplex *B, cuDoubleComplex *y, int *yrow, int *ycol) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<numLines) { int i = fromBus[index]; int j = toBus[index]; yrow[index] = i; ycol[index] = j; y[index] = cuCsub(make_cuDoubleComplex(0,0), cuCdiv(make_cuDoubleComplex(1,0),Z[index])); cuDoubleComplex temp = cuCadd(cuCdiv(make_cuDoubleComplex(1,0),Z[index]),B[index]); atomicAddComplex(&y[i+numLines], temp); atomicAddComplex(&y[j+numLines], temp); if (index<numberOfBuses) { yrow[numLines+index] = index; ycol[numLines+index] = index; } } } __global__ void powerEqnSparse(double *P, double *Q, cuDoubleComplex* y, int* yrow, int* ycol, double *Vm, double *theta, int ynnz) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < ynnz) { atomicAdd2(&P[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*cos(theta[yrow[i]] - theta[ycol[i]])) + cuCimag(y[i])*sin(theta[yrow[i]] - theta[ycol[i]]))))); //atomicAdd2(&P[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*cos(theta[ycol[i]] - theta[yrow[i]])) + cuCimag(y[i])*sin(theta[ycol[i]] - theta[yrow[i]]))))); atomicAdd2(&Q[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*sin(theta[yrow[i]] - theta[ycol[i]])) - cuCimag(y[i])*cos(theta[yrow[i]] - theta[ycol[i]]))))); //atomicAdd2(&Q[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*sin(theta[ycol[i]] - theta[yrow[i]])) - cuCimag(y[i])*cos(theta[ycol[i]] - theta[yrow[i]]))))); } } __global__ void powerEqnSparseConcise(double *P, double *Q, cuDoubleComplex* y, int* yrow, int* ycol, double *Vm, double *theta, int ynnz, int numLines) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < numLines) { atomicAdd2(&P[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*cos(theta[yrow[i]] - theta[ycol[i]])) + cuCimag(y[i])*sin(theta[yrow[i]] - theta[ycol[i]]))))); atomicAdd2(&P[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*cos(theta[ycol[i]] - theta[yrow[i]])) + cuCimag(y[i])*sin(theta[ycol[i]] - theta[yrow[i]]))))); atomicAdd2(&Q[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*sin(theta[yrow[i]] - theta[ycol[i]])) - cuCimag(y[i])*cos(theta[yrow[i]] - theta[ycol[i]]))))); atomicAdd2(&Q[ycol[i]], (Vm[ycol[i]]*(Vm[yrow[i]]*((cuCreal(y[i])*sin(theta[ycol[i]] - theta[yrow[i]])) - cuCimag(y[i])*cos(theta[ycol[i]] - theta[yrow[i]]))))); } if (i >= numLines && i < ynnz) { atomicAdd2(&P[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*cos(theta[yrow[i]] - theta[ycol[i]])) + cuCimag(y[i])*sin(theta[yrow[i]] - theta[ycol[i]]))))); atomicAdd2(&Q[yrow[i]], (Vm[yrow[i]]*(Vm[ycol[i]]*((cuCreal(y[i])*sin(theta[yrow[i]] - theta[ycol[i]])) - cuCimag(y[i])*cos(theta[yrow[i]] - theta[ycol[i]]))))); } } __global__ void countNnzJac(bool* boolCheck, int *yrow, int* ycol, cuDoubleComplex *y, int *PVbuses, int N_g, int slackBus, int* dev_J12count, int* dev_J22count) { int i = blockIdx.x * blockDim.x + threadIdx.x; for (int j=0; j<N_g; j++) { if (yrow[i] != slackBus && ycol[i] == PVbuses[j]) { atomicAdd(dev_J12count, 1); //J12count++ boolCheck[i] = true; } if (ycol[i] != slackBus && yrow[i] == PVbuses[j]) { atomicAdd(dev_J12count, 1); //J12count++ boolCheck[i] = true; } } if (boolCheck[i] == true) atomicAdd(dev_J22count, 1); //J22count++ } //full jacobian __global__ void createJacobianSparse(int ynnz, int jacCount, int slackBus, int numBus, double* P, double *Q, double *Vmag, double *Vang, cuDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); //J11 jacRow[i] = yrow[yIdx] - 1; jacCol[i] = ycol[yIdx] - 1; //J12 jacRow[i + jacCount] = yrow[yIdx] - 1; jacCol[i + jacCount] = ycol[yIdx] + numBus - 2; //J21 jacRow[i + (2*jacCount)] = yrow[yIdx] + numBus - 2; jacCol[i + (2*jacCount)] = ycol[yIdx] - 1; //J22 jacRow[i + (3*jacCount)] = yrow[yIdx] + numBus - 2; jacCol[i + (3*jacCount)] = ycol[yIdx] + numBus - 2; if (yrow[yIdx] == ycol[yIdx]) { //J11 diagonal calculations jac[i] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); //J12 diagonal calculations jac[i + jacCount] = Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]) + P[yrow[yIdx]]; //J21 diagonal calculations jac[i + (2*jacCount)] = P[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]); //J22 diagonal calculations jac[i + (3*jacCount)] = Q[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx]); } else { //J11 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); //J12 off-diagonal calculations jac[i + jacCount] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); //J21 off-diagonal calculations jac[i + (2*jacCount)] = -Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); //J22 off-diagonal calculations jac[i + (3*jacCount)] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } } __global__ void createJacobianSparse3(int ynnz, int slackBus, int numBus, double* P, double *Q, double *Vmag, double *Vang, cuDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol, int* PQbuses, int N_p, bool* boolRow, bool* boolCol, int* J22row, int* J22col) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; int PQidx = blockIdx.y * blockDim.y + threadIdx.y; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { /*if (PQidx==0) { int i = atomicAdd(&counter, 1); int j = atomicAdd(&counter2, 1); //J11 jacRow[i] = yrow[yIdx] - 1; jacCol[i] = ycol[yIdx] - 1; if (yrow[yIdx] == ycol[yIdx]) { //J11 diagonal calculations jac[i] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); } else { //J11 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } }*/ if (PQidx<N_p) { if (ycol[yIdx] == PQbuses[PQidx]) { boolCol[yIdx] = true; int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) jacRow[i] = yrow[yIdx]; else jacRow[i] = yrow[yIdx] - 1; jacCol[i] = numBus + PQidx -1; //J12 J22col[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J12 diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]) + P[yrow[yIdx]]; } else { //J12 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } if (yrow[yIdx] == PQbuses[PQidx]) { boolRow[yIdx] = true; int i = atomicAdd(&counter, 1); //int j = atomicAdd(&counter2, 1); jacRow[i] = numBus + PQidx -1; //J21 if (ycol[yIdx] < slackBus) jacCol[i] = ycol[yIdx]; else jacCol[i] = ycol[yIdx] - 1; J22row[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J21 diagonal calculations jac[i] = P[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]); } else { //J21 off-diagonal calculations jac[i] = -Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } /*if (boolRow[yIdx] == true && boolCol[yIdx] == true) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] == ycol[yIdx]) { jac[i] = Q[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx]); } else { jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } }*/ } } } } __global__ void createJ12_J21(int ynnz, int slackBus, int numBus, double* P, double *Q, double *Vmag, double *Vang, cuDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol, int* PQbuses, int N_p, bool* boolRow, bool* boolCol, int* J22row, int* J22col) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { for (int PQidx=0; PQidx<N_p; PQidx++) { if (ycol[yIdx] == PQbuses[PQidx]) { boolCol[yIdx] = true; int i = atomicAdd(&counter, 1); jacCol[i] = numBus + PQidx -1; //J12 if (yrow[yIdx] < slackBus) jacRow[i] = yrow[yIdx]; else jacRow[i] = yrow[yIdx] - 1; J22col[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J12 diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]) + P[yrow[yIdx]]; } else { //J12 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } if (yrow[yIdx] == PQbuses[PQidx]) { boolRow[yIdx] = true; int i = atomicAdd(&counter, 1); jacRow[i] = numBus + PQidx -1; //J21 if (ycol[yIdx] < slackBus) jacCol[i] = ycol[yIdx]; else jacCol[i] = ycol[yIdx] - 1; J22row[yIdx] = numBus + PQidx - 1; if (yrow[yIdx] == ycol[yIdx]) { //J21 diagonal calculations jac[i] = P[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCreal(y[yIdx]); } else { //J21 off-diagonal calculations jac[i] = -Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) + (cuCimag(y[yIdx]) * sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } } } } __global__ void createJ22(int ynnz, int numBus, double* P, double *Q, double *Vmag, double *Vang, cuDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol, bool* boolRow, bool* boolCol, int* J22row, int* J22col) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx<ynnz) { if (boolRow[yIdx] == true && boolCol[yIdx] == true) { int i = atomicAdd(&counter, 1); jacRow[i] = J22row[yIdx]; jacCol[i] = J22col[yIdx]; if (yrow[yIdx] == ycol[yIdx]) { jac[i] = Q[yrow[yIdx]] -Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx]); } else { jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } } __global__ void createJ11(int ynnz, int numLines, int numSlackLines, int slackBus, double* P, double *Q, double *Vmag, double *Vang, cuDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; int offset = numLines - ((numSlackLines - 1)/2); if (yIdx < numLines) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) { jacRow[i] = yrow[yIdx]; jacCol[i+offset] = yrow[yIdx]; } else { jacRow[i] = yrow[yIdx] - 1; jacCol[i+offset] = yrow[yIdx] - 1; } if (ycol[yIdx] < slackBus) { jacCol[i] = ycol[yIdx]; jacRow[i+offset] = ycol[yIdx]; } else { jacCol[i] = ycol[yIdx] - 1; jacRow[i+offset] = ycol[yIdx] - 1; } jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); jac[i+offset] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } if (yIdx >= numLines && yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) jacRow[i+offset] = yrow[yIdx]; else jacRow[i+offset] = yrow[yIdx] - 1; if (ycol[yIdx] < slackBus) jacCol[i+offset] = ycol[yIdx]; else jacCol[i+offset] = ycol[yIdx] - 1; jac[i+offset] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); } } } __global__ void createJ11Copy(int ynnz, int slackBus, double* P, double *Q, double *Vmag, double *Vang, cuDoubleComplex *y, int *yrow, int *ycol, double *jac, int *jacRow, int *jacCol) { int yIdx = blockIdx.x * blockDim.x + threadIdx.x; if (yIdx < ynnz) { if (yrow[yIdx] != slackBus && ycol[yIdx] !=slackBus) { int i = atomicAdd(&counter, 1); if (yrow[yIdx] < slackBus) jacRow[i] = yrow[yIdx]; else jacRow[i] = yrow[yIdx] - 1; if (ycol[yIdx] < slackBus) jacCol[i] = ycol[yIdx]; else jacCol[i] = ycol[yIdx] - 1; if (yrow[yIdx] == ycol[yIdx]) //J11 diagonal calculations jac[i] = -Q[yrow[yIdx]] - (Vmag[yrow[yIdx]]*Vmag[yrow[yIdx]]*cuCimag(y[yIdx])); else //J11 off-diagonal calculations jac[i] = Vmag[yrow[yIdx]]*Vmag[ycol[yIdx]] *((cuCreal(y[yIdx])*sin(Vang[yrow[yIdx]] - Vang[ycol[yIdx]])) - (cuCimag(y[yIdx]) * cos(Vang[yrow[yIdx]] - Vang[ycol[yIdx]]))); } } } /*void biCGStab(cusparseDtatus_t status, cusparseHandle_t handle, cusparseMatDescr_t descr_A, int M, int N, int nnz, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b)*/ /*void biCGStab(cusparseDtatus_t status, cusparseHandle_t handle, cusparseMatDescr_t descr_A, cusparseMatDescr_t descr_M, int M, int N, int nnz, int nnzJacPre, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, int* csrColIndPre, int* csrRowPtrPre, double* csrValPre, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b)*/ void biCGStab(cusparseStatus_t status, cusparseHandle_t handle, cusparseMatDescr_t descr_A, cusparseMatDescr_t descr_L, cusparseMatDescr_t descr_U, cusparseSolveAnalysisInfo_t info_L, cusparseSolveAnalysisInfo_t info_U, int M, int N, int nnz, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, int* csrColIndPre, int* csrRowPtrPre, double* csrValPre, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b) { double bnorm, snorm, err, alpha=1.0, beta, omega=1.0, rho=1.0, rho_1, resid=0; //BiCG scalars - previously global variables int flag, iter; //For cusparse csrmv function double d_one = 1.0; double dzero = 0.0; double temp=0, temp2=0; //Setup cuBLAS cublasStatus cublas_status; cublas_status = cublasInit(); if (cublas_status!=CUBLAS_STATUS_SUCCESS) { cout<<"cuBLAS Initialization Error!"<<endl; return; } cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, x, &dzero, r); cublasDscal(M, -1.0, r, 1); cublasDaxpy(M, 1.0, b, 1, r, 1); cublasDcopy(N, r, 1, p, 1); cublasDcopy(N, r, 1, r_tld, 1); bnorm = cublasDnrm2(N, b, 1); //To find Error: error = ||r||/||b|| err = cublasDnrm2(N, r, 1)/bnorm; if (err<Tol) { cout<<"Solution has already converged"<<endl; return; } for (iter=0; iter<max_it; iter++) { rho_1 = rho; rho = cublasDdot(N, r_tld, 1, r, 1); //cout<<"Rho: "<<rho<<endl; if (rho == 0) //check for breakdown break; //For every iteration after the first if (iter>0) { beta = (rho/rho_1)*(alpha/omega); //p = r+ beta(p-omega*v) cublasDaxpy(N, -omega, v, 1, p, 1); cublasDscal(N, beta, p, 1); cublasDaxpy(N, 1.0, r, 1, p, 1); } //Preconditioner to find v //If M=I, this implies p_hat = p => can use cudaMemcpyDeviceToDevice to transfer p to p_hat //p_hat = M\p cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrPre, csrColIndPre, info_L, p, t); //Here we are using t as a temporary vector - it is not needed in the algorithm at this point and saves memory of creating another vector cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrPre, csrColIndPre, info_U, t, p_hat); //p_hat = inv(M)*p //cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnzJacPre, &d_one, descr_M, csrValPre, csrRowPtrPre, csrColIndPre, p, &dzero, p_hat); //v = A*p_hat //cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p, &dzero, v); cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p_hat, &dzero, v); alpha = rho/cublasDdot(N, r_tld, 1, v, 1); //alpha = rho/(r_tld,v) cublasDaxpy(N, -alpha, v, 1, r, 1); //s=r - alpha*v cublasDcopy(N, r, 1, s, 1); //cublasDaxpy(N, alpha, p, 1, x, 1); cublasDaxpy(N, alpha, p_hat, 1, x, 1); //x = x+ alpha*p //Check for convergence snorm = cublasDnrm2(N, s, 1); if (snorm/bnorm < Tol) { resid = snorm/bnorm; break; } //Preconditioner to find t //M=I implies s = s_hat => t=As //s_hat = M\s cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrPre, csrColIndPre, info_L, r, t); cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrPre, csrColIndPre, info_U, t, s_hat); //s_hat = inv(M)*s //cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnzJacPre, &d_one, descr_M, csrValPre, csrRowPtrPre, csrColIndPre, s, &dzero, s_hat); //t=A*s_hat //cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s, &dzero, t); cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s_hat, &dzero, t); temp = cublasDdot(N, t, 1, r, 1); temp2 = cublasDdot(N, t, 1, t, 1); omega = temp/temp2; //x = x+ omega*s //cublasDaxpy(N, omega, s, 1, x, 1); cublasDaxpy(N, omega, s_hat, 1, x, 1); //r = s-omega*t cublasDaxpy(N, -omega, t, 1, r, 1); err = cublasDnrm2(N, r, 1)/bnorm; if (err<=Tol) { resid = cublasDnrm2(N, s, 1)/bnorm; break; } if (omega == 0.0) break; //rho_1 = rho; } if (err <= Tol || snorm/bnorm < Tol) flag = 0; else if (omega == 0.0) flag = -2; else if (rho == 0) flag = -1; else flag = 1; if (!flag) cout<<"The solution converged with residual "<<resid<<" in "<<iter<<" iterations."<<endl; else cout<<"BiCGStab produced error "<<flag<<" after "<<iter<<" iterations."<<endl; //Shutdown cuBLAS cublas_status = cublasShutdown(); if (cublas_status != CUBLAS_STATUS_SUCCESS) { cout<<"Shut down error"<<endl; return; } } //---------------------------------------------------------------------------------------------------------------------------------- __global__ void updateX(int jacSize, int N, int *PQindex, double *Vmag, double *theta, double *stateVector, double *x) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<(N-1)) { theta[PQindex[index]] = theta[PQindex[index]] + stateVector[index]; stateVector[index] = x[index] + stateVector[index]; } else if(index>=(N-1) && index<jacSize) { Vmag[PQindex[index]] = Vmag[PQindex[index]] + stateVector[index]; stateVector[index] = x[index] + stateVector[index]; } } __global__ void updateMismatch(int N, int jacSize, double *P_eq, double *Q_eq, int *PQindex, double* PQcalc, double* PQspec, double *powerMismatch) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index<(N-1)) { PQcalc[index] = P_eq[PQindex[index]]; } else if(index>=(N-1) && index<jacSize) { PQcalc[index] = Q_eq[PQindex[index]]; } powerMismatch[index] = PQspec[index] - PQcalc[index]; } __global__ void jacobiPrecond(int jacSize, double *jacobian, double *precInv) { int rowIdx = blockDim.y*blockIdx.y + threadIdx.y; int colIdx = blockDim.x*blockIdx.x + threadIdx.x; int index = rowIdx*jacSize + colIdx; //row major order - modify for column major precInv[index] = 0; if (rowIdx == colIdx) precInv[index] = 1/jacobian[index]; //inverse of strictly diagonal matrix is 1/a_ii } __device__ double radToDeg(double a) { return 57.29577951*a; } __device__ void atomicAddComplex(cuDoubleComplex *a, cuDoubleComplex b) { double *x = (double*)a; double *y = x+1; atomicAdd2(x, cuCreal(b)); atomicAdd2(y, cuCimag(b)); } __device__ double atomicAdd2(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } void biCGStab2(cusparseStatus_t status, cusparseHandle_t handle, cusparseMatDescr_t descr_A, int M, int N, int nnz, int* csrColIndAdev, int* csrRowPtrAdev, double* csrValAdev, double* x, double* r, double* r_tld, double* p, double *p_hat, double* s, double *s_hat, double* v, double* t, double* b, int jacSize) { double* csrValPre; cudaEvent_t start, stop; float elapsedTime; ofstream triSolve1("Tri solve 1.txt"); ofstream triSolve2("Tri solve 2.txt"); ofstream spMV1("SpMV 1.txt"); ofstream spMV2("SpMV 2.txt"); ofstream ilu("ILU matrix.txt"); cudaMalloc((void**)&csrValPre, nnz*sizeof(double)); cudaMemcpy(csrValPre, csrValAdev, nnz*sizeof(double), cudaMemcpyDeviceToDevice); cusparseMatDescr_t descr_M = 0; cusparseMatDescr_t descr_L = 0; cusparseMatDescr_t descr_U = 0; cusparseSolveAnalysisInfo_t info_M; cusparseSolveAnalysisInfo_t info_L; cusparseSolveAnalysisInfo_t info_U; //Create and setup matrix descriptors for ILU preconditioner cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cusparseCreateMatDescr(&descr_M); cusparseSetMatType(descr_M, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr_M, CUSPARSE_INDEX_BASE_ZERO); cusparseCreateMatDescr(&descr_L); cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER); cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_UNIT); cusparseCreateMatDescr(&descr_U); cusparseSetMatType(descr_U, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr_U, CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descr_U, CUSPARSE_FILL_MODE_UPPER); cusparseSetMatDiagType(descr_U, CUSPARSE_DIAG_TYPE_NON_UNIT); cusparseCreateSolveAnalysisInfo(&info_M); cusparseCreateSolveAnalysisInfo(&info_L); cusparseCreateSolveAnalysisInfo(&info_U); //Perform Analysis before calling ilu0(); NB analysis can be done on L and U at this point since sparsity pattern is the same cusparseDcsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, nnz, descr_M, csrValPre, csrRowPtrAdev, csrColIndAdev, info_M); cusparseDcsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, nnz, descr_L, csrValPre, csrRowPtrAdev, csrColIndAdev, info_L); cusparseDcsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, nnz, descr_U, csrValPre, csrRowPtrAdev, csrColIndAdev, info_U); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"ILU setup: "<<elapsedTime<<" ms"<<endl; //Perform ILU0 factorization cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cusparseDcsrilu0(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, descr_M, csrValPre, csrRowPtrAdev, csrColIndAdev, info_M); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"ILU formation: "<<elapsedTime<<" ms"<<endl; //double *csrValPreHost = new double[nnz]; //cudaMemcpy(csrValPreHost, csrValPre, nnz*sizeof(double), cudaMemcpyDeviceToHost); //for (int i=0; i<nnz; i++) { // ilu<<csrValPreHost[i]<<endl; //} //ilu.close(); double bnorm, snorm=0, err, alpha=1.0, beta, omega=1.0, rho=1.0, rho_1, resid=0; //BiCG scalars - previously global variables int flag, iter; //For cusparse csrmv function double d_one = 1.0; double dzero = 0.0; double temp=0, temp2=0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, x, &dzero, r); cublasDscal(M, -1.0, r, 1); cublasDaxpy(M, 1.0, b, 1, r, 1); cublasDcopy(N, r, 1, p, 1); cublasDcopy(N, r, 1, r_tld, 1); bnorm = cublasDnrm2(N, b, 1); //To find Error: error = ||r||/||b|| err = cublasDnrm2(N, r, 1)/bnorm; if (err<Tol) { cout<<"Solution has already converged"<<endl; return; } for (iter=0; iter<max_it; iter++) { rho_1 = rho; rho = cublasDdot(N, r_tld, 1, r, 1); //cout<<"Rho: "<<rho<<endl; if (rho == 0) //check for breakdown break; //For every iteration after the first if (iter>0) { beta = (rho/rho_1)*(alpha/omega); //p = r+ beta(p-omega*v) cublasDaxpy(N, -omega, v, 1, p, 1); cublasDscal(N, beta, p, 1); cublasDaxpy(N, 1.0, r, 1, p, 1); } cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrAdev, csrColIndAdev, info_L, p, t); cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrAdev, csrColIndAdev, info_U, t, p_hat); //v = A*p_hat //cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p, &dzero, v); cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, p_hat, &dzero, v); /*double *vhost = new double[jacSize]; cudaMemcpy(vhost, v, jacSize*sizeof(double), cudaMemcpyDeviceToHost); ofstream phat("v.txt"); for (int i=0; i<jacSize; i++) phat<<vhost[i]<<endl; phat.close();*/ alpha = rho/cublasDdot(N, r_tld, 1, v, 1); //alpha = rho/(r_tld,v) cublasDaxpy(N, -alpha, v, 1, r, 1); //s=r - alpha*v cublasDcopy(N, r, 1, s, 1); //cublasDaxpy(N, alpha, p, 1, x, 1); cublasDaxpy(N, alpha, p_hat, 1, x, 1); //x = x+ alpha*p //Check for convergence snorm = cublasDnrm2(N, s, 1); if (snorm/bnorm < Tol) { resid = snorm/bnorm; //break; } //Preconditioner to find t //M=I implies s = s_hat => t=As //s_hat = M\s cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_L, csrValPre, csrRowPtrAdev, csrColIndAdev, info_L, r, t); cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, &d_one, descr_U, csrValPre, csrRowPtrAdev, csrColIndAdev, info_U, t, s_hat); //t=A*s_hat //cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s, &dzero, t); cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, M, N, nnz, &d_one, descr_A, csrValAdev, csrRowPtrAdev, csrColIndAdev, s_hat, &dzero, t); temp = cublasDdot(N, t, 1, r, 1); temp2 = cublasDdot(N, t, 1, t, 1); omega = temp/temp2; //x = x+ omega*s //cublasSaxpy(N, omega, s, 1, x, 1); cublasDaxpy(N, omega, s_hat, 1, x, 1); //r = s-omega*t cublasDaxpy(N, -omega, t, 1, r, 1); err = cublasDnrm2(N, r, 1)/bnorm; if (err<=Tol) { resid = cublasDnrm2(N, s, 1)/bnorm; break; } if (omega == 0.0) break; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout<<"BiCG Stab: "<<elapsedTime<<" ms"<<endl; if (err <= Tol || snorm/bnorm < Tol) flag = 0; else if (omega == 0.0) flag = -2; else if (rho == 0) flag = -1; else flag = 1; if (!flag) cout<<"The solution converged with residual "<<resid<<" in "<<iter<<" iterations."<<endl; else cout<<"BiCGStab produced error "<<flag<<" after "<<iter<<" iterations."<<endl; } void powerMethod(int *csrRowPtrA, int *csrColIndA, double *csrValA, cusparseMatDescr_t descr_A, int nnz, double *x, double *dev_x, double *dev_c, cusparseHandle_t handle, double* eigen, int n) { double temp, invEigen; int d, i=0; double numOne = 1.0; double numZero = 0.0; do { i++; //c = Ax ... MV mult //cublasSgemv('n', n, n, 1, dev_a, n, dev_x, 1, 0, dev_c, 1); cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz, &numOne, descr_A, csrValA, csrRowPtrA, csrColIndA, dev_x, &numZero, dev_c); //copy c[] to x[] cublasDcopy(n, dev_c, 1, dev_x, 1); temp = *eigen; //get max value in result x[] d = cublasIdamax(n, dev_x, 1); cudaMemcpy(x, dev_x, n*sizeof(double), cudaMemcpyDeviceToHost); *eigen = x[d-1]; //factorize largest value out, obtain next vector invEigen = 1.0/(*eigen); cublasDscal(n, invEigen, dev_x, 1); } while (fabs((*eigen)-temp)>0.00001); }
a94c98f6d3cb20ace7b61682e7114f9794f75c71.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include "../../common/polybenchUtilFuncts.h" #define PERCENT_DIFF_ERROR_THRESHOLD 0.7 #define NX 8192 #define NY 8192 #define GPU_DEVICE 1 #ifndef M_PI #define M_PI 3.14159 #endif typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r [i] = i * M_PI; for (j = 0; j < NY; j++) { A [i * NY + j] = ((DATA_TYPE)i * j) / NX; } } for (i = 0; i < NY; i++) { p [i] = i * M_PI; } } void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < NX; i++) { if (percentDiff(q [i], q_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i = 0; i < NY; i++) { if (percentDiff(s [i], s_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void CPU__bicg(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i, j; for (i = 0; i < NY; i++) { s [i] = 0.0; } for (i = 0; i < NX; i++) { q [i] = 0.0; for (j = 0; j < NY; j++) { q [i] = q [i] + A [i * NY + j] * p [j]; } } for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) { s [j] = s [j] + r [i] * A [i * NY + j]; } } } __global__ void __generated_kernel_region_0(DATA_TYPE * s); __global__ void __generated_kernel_region_1(DATA_TYPE * A,DATA_TYPE * s,DATA_TYPE * r); __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * q,DATA_TYPE * p); void GPU__bicg(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation s\n"); acc_present_or_create((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin s\n"); acc_pcopyin((void*)s,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_0), dim3((((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)s)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout s\n"); acc_copyout_and_keep((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation r\n"); acc_present_or_create((void*)r,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation s\n"); acc_present_or_create((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin r\n"); acc_pcopyin((void*)r,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin s\n"); acc_pcopyin((void*)s,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_1), dim3((((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)s), (DATA_TYPE *)acc_deviceptr((void*)r)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout r\n"); acc_copyout_and_keep((void*)r,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout s\n"); acc_copyout_and_keep((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation p\n"); acc_present_or_create((void*)p,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation q\n"); acc_present_or_create((void*)q,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin p\n"); acc_pcopyin((void*)p,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin q\n"); acc_pcopyin((void*)q,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_2), dim3((((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)q), (DATA_TYPE *)acc_deviceptr((void*)p)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout p\n"); acc_copyout_and_keep((void*)p,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout q\n"); acc_copyout_and_keep((void*)q,(8191+0)*sizeof(DATA_TYPE )); } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* r; DATA_TYPE* s; DATA_TYPE* p; DATA_TYPE* q; DATA_TYPE* s_GPU; DATA_TYPE* q_GPU; A = (DATA_TYPE*)malloc(NX * NY * sizeof(DATA_TYPE)); r = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); s = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); p = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); q = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); s_GPU = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); q_GPU = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< BiCG Sub Kernel of BiCGStab Linear Solver >>\n"); init_array(A, p, r); t_start = rtclock(); GPU__bicg(A, r, s_GPU, p, q_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); CPU__bicg(A, r, s, p, q); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(s, s_GPU, q, q_GPU); free(A); free(r); free(s); free(p); free(q); free(s_GPU); free(q_GPU); return 0; } __global__ void __generated_kernel_region_0(DATA_TYPE * s){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; { { { i=0+(__kernel_getuid_x); if( i < NY) { s [i] = 0.0; } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(DATA_TYPE * A,DATA_TYPE * s,DATA_TYPE * r){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { j=0+(__kernel_getuid_x); if( j < NY) { for(i = 0; i < NX; i++) { s [j] = s [j] + r [i] * A [i * NY + j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * q,DATA_TYPE * p){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NX) { q [i] = 0.0; for(j = 0; j < NY; j++) { q [i] = q [i] + A [i * NY + j] * p [j]; } } } } } //append writeback of scalar variables }
a94c98f6d3cb20ace7b61682e7114f9794f75c71.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <sys/time.h> #include "../../common/polybenchUtilFuncts.h" #define PERCENT_DIFF_ERROR_THRESHOLD 0.7 #define NX 8192 #define NY 8192 #define GPU_DEVICE 1 #ifndef M_PI #define M_PI 3.14159 #endif typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r [i] = i * M_PI; for (j = 0; j < NY; j++) { A [i * NY + j] = ((DATA_TYPE)i * j) / NX; } } for (i = 0; i < NY; i++) { p [i] = i * M_PI; } } void compareResults(DATA_TYPE* s, DATA_TYPE* s_outputFromGpu, DATA_TYPE* q, DATA_TYPE* q_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < NX; i++) { if (percentDiff(q [i], q_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i = 0; i < NY; i++) { if (percentDiff(s [i], s_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void CPU__bicg(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i, j; for (i = 0; i < NY; i++) { s [i] = 0.0; } for (i = 0; i < NX; i++) { q [i] = 0.0; for (j = 0; j < NY; j++) { q [i] = q [i] + A [i * NY + j] * p [j]; } } for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) { s [j] = s [j] + r [i] * A [i * NY + j]; } } } __global__ void __generated_kernel_region_0(DATA_TYPE * s); __global__ void __generated_kernel_region_1(DATA_TYPE * A,DATA_TYPE * s,DATA_TYPE * r); __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * q,DATA_TYPE * p); void GPU__bicg(DATA_TYPE* A, DATA_TYPE* r, DATA_TYPE* s, DATA_TYPE* p, DATA_TYPE* q) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation s\n"); acc_present_or_create((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin s\n"); acc_pcopyin((void*)s,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_0<<<(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)s)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout s\n"); acc_copyout_and_keep((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation r\n"); acc_present_or_create((void*)r,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation s\n"); acc_present_or_create((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin r\n"); acc_pcopyin((void*)r,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin s\n"); acc_pcopyin((void*)s,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_1<<<(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)s), (DATA_TYPE *)acc_deviceptr((void*)r)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout r\n"); acc_copyout_and_keep((void*)r,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout s\n"); acc_copyout_and_keep((void*)s,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation p\n"); acc_present_or_create((void*)p,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation q\n"); acc_present_or_create((void*)q,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin p\n"); acc_pcopyin((void*)p,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin q\n"); acc_pcopyin((void*)q,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_2<<<(((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)q), (DATA_TYPE *)acc_deviceptr((void*)p)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout p\n"); acc_copyout_and_keep((void*)p,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout q\n"); acc_copyout_and_keep((void*)q,(8191+0)*sizeof(DATA_TYPE )); } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* r; DATA_TYPE* s; DATA_TYPE* p; DATA_TYPE* q; DATA_TYPE* s_GPU; DATA_TYPE* q_GPU; A = (DATA_TYPE*)malloc(NX * NY * sizeof(DATA_TYPE)); r = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); s = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); p = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); q = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); s_GPU = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); q_GPU = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< BiCG Sub Kernel of BiCGStab Linear Solver >>\n"); init_array(A, p, r); t_start = rtclock(); GPU__bicg(A, r, s_GPU, p, q_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); CPU__bicg(A, r, s, p, q); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(s, s_GPU, q, q_GPU); free(A); free(r); free(s); free(p); free(q); free(s_GPU); free(q_GPU); return 0; } __global__ void __generated_kernel_region_0(DATA_TYPE * s){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; { { { i=0+(__kernel_getuid_x); if( i < NY) { s [i] = 0.0; } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(DATA_TYPE * A,DATA_TYPE * s,DATA_TYPE * r){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { j=0+(__kernel_getuid_x); if( j < NY) { for(i = 0; i < NX; i++) { s [j] = s [j] + r [i] * A [i * NY + j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * q,DATA_TYPE * p){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NX) { q [i] = 0.0; for(j = 0; j < NY; j++) { q [i] = q [i] + A [i * NY + j] * p [j]; } } } } } //append writeback of scalar variables }