hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
6a8bc6f338f93e310b1159478dc38e7602f3337a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Ising model: Halmitonian H = /sum_ij J(sigma_i)(sigma_j) */ /* * 1. Calculate the energy in the program * 2. Calculate the heat capacity in the program * 3. Add more inputs to adjust the length of lattice * 4. A matlab code to plot data. * data format example: * position.x position.y spin(-1, 1) * Iteattion 1: 1 4 -1 * * * * * * * * * Iteattion 2: 4 3 1 * * * * * * * * * Iteattion N: 35 76 1 * * * * * * * * * 5. Compare the numerical value with the analytic value * 6. Move to 3D */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> /* time */ #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> /* * LATTICE_LENGTH - length of the lattice * LATTICE_LENGTH - number of element is one lattice * BOLTZMANN_CONST - bolzmann constant. Set to 1. */ #define LATTICE_LENGTH 1024 #define LATTICE_2 (LATTICE_LENGTH * LATTICE_LENGTH) #define BOLTZMANN_CONST 1 #define N LATTICE_LENGTH #define WARM_STEP 1e3 #define MEAS_STEP 1e3 #define WARP 1e1 #define NUM_THREAD_X 32 #define NUM_THREAD_Y 32 #define TEMPERATURE 4.0 __device__ int energy(int up, int down, int left, int right, int center); __global__ void update(int *lattice, double beta, double *E_d, double *M_d, double *E2_d, double *M2_d, int tag, hiprandState_t * global_state); __global__ void printstate(int *lattice); __global__ void init_rand(hiprandState_t * global_state, unsigned long seed); /* Setup random seed to each kernel */ __global__ void init_rand(hiprandState_t * global_state, unsigned long seed){ const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; hiprand_init(seed, idx + idy * N, 0, &global_state[idx + idy * N]); __syncthreads(); } /* * update is the function to update a point * 1. flip a point (1 -> -1 or -1 -> 1) * 2. compare the energy before flip a point and after flip a point * 3. if the energy with flipped point is small, accept * 4. if the energy is larger, generate a random number pro_rand (0,1), * if pro_rand < e^(-beta * delatE), aceept. else reject. */ __global__ void update(int* lattice, double beta, double *E_d, double *M_d, double *E2_d, double *M2_d, int tag, hiprandState_t * global_state){ // Calculate the global index // Calculate the global index for the up, down, left, right index. // declare parameters int itx, ity, idx, idy, index; int flip, up, down, left, right, center; double pro_rand, deltaE, E; // local index itx = threadIdx.x; ity = threadIdx.y; // global index idx = blockIdx.x * blockDim.x + itx; idy = blockIdx.y * blockDim.y + ity; index = idx * N + idy; // load data into shared memory __shared__ int lat[32 + 2][32 + 2]; __syncthreads(); lat[itx+1][ity+1] = lattice[index]; if(idx == 0){ lat[itx][ity + 1] = lattice[index + (N - 1) * N]; }else if(itx == 0){ lat[itx][ity + 1] = lattice[index - N]; } if(idx == N - 1){ lat[itx + 2][ity + 1] = lattice[index - (N - 1) * N]; }else if(itx == NUM_THREAD_X - 1){ lat[itx + 2][ity + 1] = lattice[index + N -1]; } if(idy == 0){ lat[itx + 1][ity] = lattice[index + N - 1]; }else if(ity == 0){ lat[itx + 1][ity] = lattice[index - 1]; } if(idy == N - 1){ lat[itx + 1][ity + 2] = lattice[index - (N - 1)]; }else if(ity == NUM_THREAD_X - 1){ lat[itx + 1][ity + 2] = lattice[index + 1]; } hiprandState_t local_state = global_state[idx * N + idy]; pro_rand = hiprand_uniform(&local_state); global_state[idx * N + idy] = local_state; __syncthreads(); // for even sites if((idx + idy) % 2 == 0){ up = lat[itx][ity + 1]; down = lat[itx + 2][ity + 1]; left = lat[itx + 1][ity]; right = lat[itx + 1][ity + 2]; center = lat[itx + 1][ity + 1]; // Flip the center element flip = -center; // Calculate the difference between these two state E = energy(up, down, left, right, center); deltaE = -2.0 * E; // If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value if (deltaE < 0 || pro_rand <= exp(- 1.0 * beta * (deltaE * 1.0))){ lat[itx + 1][ity + 1] *= -1; } } // wait for even site completion __syncthreads(); // for odd sites if((idx + idy) % 2 == 1){ up = lat[itx][ity + 1]; down = lat[itx + 2][ity + 1]; left = lat[itx + 1][ity]; right = lat[itx + 1][ity + 2]; center = lat[itx + 1][ity + 1]; // Flip the center element flip = -center; // Calculate the difference between these two state E = energy(up, down, left, right, center); deltaE = -2.0 * E; // If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value if (deltaE < 0 || pro_rand <= exp(- 1.0 * beta * (deltaE * 1.0))){ lat[itx + 1][ity + 1] *= -1; } } // wait for odd site completion __syncthreads(); // store data back lattice[index] = lat[itx + 1][ity + 1]; if(tag == 1){ E_d[index] += E; M_d[index] += lat[itx+1][ity+1]; E2_d[index] += E * E; M2_d[index] += lat[itx+1][ity+1] * lat[itx+1][ity+1]; } __syncthreads(); } /* * printstate is the function to print the whole matrix. * Since it prints in parallel, we also print the global * index of the matrx. * it prints (x, y, (1 or -1)). */ __global__ void printstate(int* lattice) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < N && idy < N){ printf("%d, %d, %d\n", idx, idy, lattice[idx + idy * N]); } __syncthreads(); } /* * energy is the function used to calculate the energy between * (center, up), (center, down), (center, left), (center, right) */ __device__ int energy(int up, int down, int left, int right, int center){ double H; H = - up * center - down * center - left * center - right * center; return H; } /* * Commandline inputs option * 1. Tempurature (T) * */ int main (int argc, char *argv[]){ int *lattice; int *d_lattice; double *E; double *E_d; double *E2; double *E2_d; double *M; double *M_d; double *M2; double *M2_d; double T = TEMPERATURE; int warmsteps = WARM_STEP; int nout = MEAS_STEP; int warp = WARP; int numthreadx = NUM_THREAD_X; int numthready = NUM_THREAD_Y; int numblocksX = LATTICE_LENGTH / numthreadx; int numblocksY = LATTICE_LENGTH / numthready; // First input: Tempurature. Usually between (1, 6), // Critical Tempurature is around 2.2 T = argc > 1 ? atof(argv[1]) : T; warmsteps = argc > 2 ? atof(argv[2]) : warmsteps; nout = argc > 3 ? atof(argv[3]) : nout; warp = argc > 4 ? atof(argv[4]) : warp; // Define the size of lattice and energy const size_t bytes_lattice = LATTICE_2 * sizeof(int); const size_t bytes_E = LATTICE_2 * sizeof(double); const size_t bytes_M = LATTICE_2 * sizeof(double); // Allocate memory for lattice. It is a lattice^2 long array. // The value can only be 1 or -1. lattice = (int*)malloc(LATTICE_2 * sizeof(int)); E = (double*)malloc(LATTICE_2 * sizeof(double)); M = (double*)malloc(LATTICE_2 * sizeof(double)); E2 = (double*)malloc(LATTICE_2 * sizeof(double)); M2 = (double*)malloc(LATTICE_2 * sizeof(double)); // initialize lattice by rand(-1, 1) for(int i = 0; i < LATTICE_2; i++){ lattice[i] = 2 * (rand() % 2) - 1; E[i] = 0.0; M[i] = 0.0; E2[i] = 0.0; M2[i] = 0.0; } // Set dimensions of block and grid dim3 grid(numblocksX, numblocksY, 1); dim3 thread(numthreadx, numthready,1); // set up random for each kernel hiprandState_t *global_state; hipMalloc(&global_state, LATTICE_2 * sizeof(hiprandState_t)); hipLaunchKernelGGL(( init_rand), dim3(grid), dim3(thread) , 0, 0, global_state, unsigned(time(NULL))); // beta is a parameter in the probability double beta = 1.0 / (BOLTZMANN_CONST * 1.0) / T; // Allocate memoery in device and copy from host to device hipMalloc((void **)&d_lattice, bytes_lattice); hipMalloc((void **)&E_d, bytes_E); hipMalloc((void **)&M_d, bytes_M); hipMalloc((void **)&E2_d, bytes_E); hipMalloc((void **)&M2_d, bytes_M); hipMemcpy(d_lattice, lattice, bytes_lattice, hipMemcpyHostToDevice); hipMemcpy(E_d, E, bytes_E, hipMemcpyHostToDevice); hipMemcpy(M_d, M, bytes_M, hipMemcpyHostToDevice); hipMemcpy(E2_d, E2, bytes_E, hipMemcpyHostToDevice); hipMemcpy(M2_d, M2, bytes_M, hipMemcpyHostToDevice); // To change the buffer size of printf; otherwise it cannot print all data hipDeviceSetLimit(hipLimitPrintfFifoSize, N * N * sizeof(int)); // printf("Testing for T = %2f, beta = %2f...\n", T, beta); // Warmup process // printf("Starting Warming Steps... \n"); int cnt = 0; for (int iter = 0; iter < warmsteps; iter++){ // printf("\r [ %f% ] ", (100.0 * cnt++) / warmsteps); hipLaunchKernelGGL(( update), dim3(grid), dim3(thread), 0, 0, d_lattice, beta, E_d, M_d, E2_d, M2_d, 0, global_state); hipDeviceSynchronize(); } // printf("\n"); // Measure process // printf("Starting Measurement Steps... \n"); cnt = 0; int cnt2 = 0; for (int nstep = 0; nstep < nout; nstep++){ // printf("\r [ %f% ] ", (100.0 * cnt++) / nout); if(nstep % warp == 0){ cnt2++; hipLaunchKernelGGL(( update), dim3(grid), dim3(thread), 0, 0, d_lattice, beta, E_d, M_d, E2_d, M2_d, 1, global_state); }else{ hipLaunchKernelGGL(( update), dim3(grid), dim3(thread), 0, 0, d_lattice, beta, E_d, M_d, E2_d, M2_d, 0, global_state); } hipDeviceSynchronize(); } // printf("\n"); double energy = 0.0; double magnetization = 0.0; double energy2 = 0.0; double magnetization2 = 0.0; hipMemcpy(lattice, d_lattice, bytes_E, hipMemcpyDeviceToHost); hipMemcpy(E, E_d, bytes_E, hipMemcpyDeviceToHost); hipMemcpy(M, M_d, bytes_M, hipMemcpyDeviceToHost); hipMemcpy(E2, E2_d, bytes_E, hipMemcpyDeviceToHost); hipMemcpy(M2, M2_d, bytes_M, hipMemcpyDeviceToHost); for(int i = 0; i < LATTICE_2; i++){ energy += E[i]; magnetization += M[i]; energy2 += E2[i]; magnetization2 += M2[i]; } double avg_E = energy / cnt2 / (LATTICE_2 * 1.0) / 2.0; double avg_M = magnetization / cnt2 / (LATTICE_2 * 1.0); avg_M = avg_M < 0 ? -avg_M : avg_M; double avg_E2 = energy2 / cnt2 / (LATTICE_2 * 1.0) / 4.0; double avg_M2 = magnetization2 / cnt2 / (LATTICE_2 * 1.0); double heat_cap = 1.0 * (avg_E2 - avg_E * avg_E) / T / T; double mag_sus = 1.0 * (avg_M2 - avg_M * avg_M) / T; // printf("Average energy: %5f \n", avg_E); // printf("Average magnetization: %5f \n", avg_M); printf("%5f %5f %5f %5f %5f\n", T, avg_E, avg_M, heat_cap, mag_sus); free(lattice); free(E); free(M); free(E2); free(M2); hipFree(d_lattice); hipFree(E_d); hipFree(M_d); hipFree(E2_d); hipFree(M2_d); return 0; }
6a8bc6f338f93e310b1159478dc38e7602f3337a.cu
/* * Ising model: Halmitonian H = /sum_ij J(sigma_i)(sigma_j) */ /* * 1. Calculate the energy in the program * 2. Calculate the heat capacity in the program * 3. Add more inputs to adjust the length of lattice * 4. A matlab code to plot data. * data format example: * position.x position.y spin(-1, 1) * Iteattion 1: 1 4 -1 * * * * * * * * * Iteattion 2: 4 3 1 * * * * * * * * * Iteattion N: 35 76 1 * * * * * * * * * 5. Compare the numerical value with the analytic value * 6. Move to 3D */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> /* time */ #include <curand.h> #include <curand_kernel.h> /* * LATTICE_LENGTH - length of the lattice * LATTICE_LENGTH - number of element is one lattice * BOLTZMANN_CONST - bolzmann constant. Set to 1. */ #define LATTICE_LENGTH 1024 #define LATTICE_2 (LATTICE_LENGTH * LATTICE_LENGTH) #define BOLTZMANN_CONST 1 #define N LATTICE_LENGTH #define WARM_STEP 1e3 #define MEAS_STEP 1e3 #define WARP 1e1 #define NUM_THREAD_X 32 #define NUM_THREAD_Y 32 #define TEMPERATURE 4.0 __device__ int energy(int up, int down, int left, int right, int center); __global__ void update(int *lattice, double beta, double *E_d, double *M_d, double *E2_d, double *M2_d, int tag, curandState * global_state); __global__ void printstate(int *lattice); __global__ void init_rand(curandState * global_state, unsigned long seed); /* Setup random seed to each kernel */ __global__ void init_rand(curandState * global_state, unsigned long seed){ const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; curand_init(seed, idx + idy * N, 0, &global_state[idx + idy * N]); __syncthreads(); } /* * update is the function to update a point * 1. flip a point (1 -> -1 or -1 -> 1) * 2. compare the energy before flip a point and after flip a point * 3. if the energy with flipped point is small, accept * 4. if the energy is larger, generate a random number pro_rand (0,1), * if pro_rand < e^(-beta * delatE), aceept. else reject. */ __global__ void update(int* lattice, double beta, double *E_d, double *M_d, double *E2_d, double *M2_d, int tag, curandState * global_state){ // Calculate the global index // Calculate the global index for the up, down, left, right index. // declare parameters int itx, ity, idx, idy, index; int flip, up, down, left, right, center; double pro_rand, deltaE, E; // local index itx = threadIdx.x; ity = threadIdx.y; // global index idx = blockIdx.x * blockDim.x + itx; idy = blockIdx.y * blockDim.y + ity; index = idx * N + idy; // load data into shared memory __shared__ int lat[32 + 2][32 + 2]; __syncthreads(); lat[itx+1][ity+1] = lattice[index]; if(idx == 0){ lat[itx][ity + 1] = lattice[index + (N - 1) * N]; }else if(itx == 0){ lat[itx][ity + 1] = lattice[index - N]; } if(idx == N - 1){ lat[itx + 2][ity + 1] = lattice[index - (N - 1) * N]; }else if(itx == NUM_THREAD_X - 1){ lat[itx + 2][ity + 1] = lattice[index + N -1]; } if(idy == 0){ lat[itx + 1][ity] = lattice[index + N - 1]; }else if(ity == 0){ lat[itx + 1][ity] = lattice[index - 1]; } if(idy == N - 1){ lat[itx + 1][ity + 2] = lattice[index - (N - 1)]; }else if(ity == NUM_THREAD_X - 1){ lat[itx + 1][ity + 2] = lattice[index + 1]; } curandState local_state = global_state[idx * N + idy]; pro_rand = curand_uniform(&local_state); global_state[idx * N + idy] = local_state; __syncthreads(); // for even sites if((idx + idy) % 2 == 0){ up = lat[itx][ity + 1]; down = lat[itx + 2][ity + 1]; left = lat[itx + 1][ity]; right = lat[itx + 1][ity + 2]; center = lat[itx + 1][ity + 1]; // Flip the center element flip = -center; // Calculate the difference between these two state E = energy(up, down, left, right, center); deltaE = -2.0 * E; // If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value if (deltaE < 0 || pro_rand <= exp(- 1.0 * beta * (deltaE * 1.0))){ lat[itx + 1][ity + 1] *= -1; } } // wait for even site completion __syncthreads(); // for odd sites if((idx + idy) % 2 == 1){ up = lat[itx][ity + 1]; down = lat[itx + 2][ity + 1]; left = lat[itx + 1][ity]; right = lat[itx + 1][ity + 2]; center = lat[itx + 1][ity + 1]; // Flip the center element flip = -center; // Calculate the difference between these two state E = energy(up, down, left, right, center); deltaE = -2.0 * E; // If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value if (deltaE < 0 || pro_rand <= exp(- 1.0 * beta * (deltaE * 1.0))){ lat[itx + 1][ity + 1] *= -1; } } // wait for odd site completion __syncthreads(); // store data back lattice[index] = lat[itx + 1][ity + 1]; if(tag == 1){ E_d[index] += E; M_d[index] += lat[itx+1][ity+1]; E2_d[index] += E * E; M2_d[index] += lat[itx+1][ity+1] * lat[itx+1][ity+1]; } __syncthreads(); } /* * printstate is the function to print the whole matrix. * Since it prints in parallel, we also print the global * index of the matrx. * it prints (x, y, (1 or -1)). */ __global__ void printstate(int* lattice) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < N && idy < N){ printf("%d, %d, %d\n", idx, idy, lattice[idx + idy * N]); } __syncthreads(); } /* * energy is the function used to calculate the energy between * (center, up), (center, down), (center, left), (center, right) */ __device__ int energy(int up, int down, int left, int right, int center){ double H; H = - up * center - down * center - left * center - right * center; return H; } /* * Commandline inputs option * 1. Tempurature (T) * */ int main (int argc, char *argv[]){ int *lattice; int *d_lattice; double *E; double *E_d; double *E2; double *E2_d; double *M; double *M_d; double *M2; double *M2_d; double T = TEMPERATURE; int warmsteps = WARM_STEP; int nout = MEAS_STEP; int warp = WARP; int numthreadx = NUM_THREAD_X; int numthready = NUM_THREAD_Y; int numblocksX = LATTICE_LENGTH / numthreadx; int numblocksY = LATTICE_LENGTH / numthready; // First input: Tempurature. Usually between (1, 6), // Critical Tempurature is around 2.2 T = argc > 1 ? atof(argv[1]) : T; warmsteps = argc > 2 ? atof(argv[2]) : warmsteps; nout = argc > 3 ? atof(argv[3]) : nout; warp = argc > 4 ? atof(argv[4]) : warp; // Define the size of lattice and energy const size_t bytes_lattice = LATTICE_2 * sizeof(int); const size_t bytes_E = LATTICE_2 * sizeof(double); const size_t bytes_M = LATTICE_2 * sizeof(double); // Allocate memory for lattice. It is a lattice^2 long array. // The value can only be 1 or -1. lattice = (int*)malloc(LATTICE_2 * sizeof(int)); E = (double*)malloc(LATTICE_2 * sizeof(double)); M = (double*)malloc(LATTICE_2 * sizeof(double)); E2 = (double*)malloc(LATTICE_2 * sizeof(double)); M2 = (double*)malloc(LATTICE_2 * sizeof(double)); // initialize lattice by rand(-1, 1) for(int i = 0; i < LATTICE_2; i++){ lattice[i] = 2 * (rand() % 2) - 1; E[i] = 0.0; M[i] = 0.0; E2[i] = 0.0; M2[i] = 0.0; } // Set dimensions of block and grid dim3 grid(numblocksX, numblocksY, 1); dim3 thread(numthreadx, numthready,1); // set up random for each kernel curandState *global_state; cudaMalloc(&global_state, LATTICE_2 * sizeof(curandState)); init_rand<<< grid, thread >>> (global_state, unsigned(time(NULL))); // beta is a parameter in the probability double beta = 1.0 / (BOLTZMANN_CONST * 1.0) / T; // Allocate memoery in device and copy from host to device cudaMalloc((void **)&d_lattice, bytes_lattice); cudaMalloc((void **)&E_d, bytes_E); cudaMalloc((void **)&M_d, bytes_M); cudaMalloc((void **)&E2_d, bytes_E); cudaMalloc((void **)&M2_d, bytes_M); cudaMemcpy(d_lattice, lattice, bytes_lattice, cudaMemcpyHostToDevice); cudaMemcpy(E_d, E, bytes_E, cudaMemcpyHostToDevice); cudaMemcpy(M_d, M, bytes_M, cudaMemcpyHostToDevice); cudaMemcpy(E2_d, E2, bytes_E, cudaMemcpyHostToDevice); cudaMemcpy(M2_d, M2, bytes_M, cudaMemcpyHostToDevice); // To change the buffer size of printf; otherwise it cannot print all data cudaDeviceSetLimit(cudaLimitPrintfFifoSize, N * N * sizeof(int)); // printf("Testing for T = %2f, beta = %2f...\n", T, beta); // Warmup process // printf("Starting Warming Steps... \n"); int cnt = 0; for (int iter = 0; iter < warmsteps; iter++){ // printf("\r [ %f% ] ", (100.0 * cnt++) / warmsteps); update<<<grid, thread>>>(d_lattice, beta, E_d, M_d, E2_d, M2_d, 0, global_state); cudaDeviceSynchronize(); } // printf("\n"); // Measure process // printf("Starting Measurement Steps... \n"); cnt = 0; int cnt2 = 0; for (int nstep = 0; nstep < nout; nstep++){ // printf("\r [ %f% ] ", (100.0 * cnt++) / nout); if(nstep % warp == 0){ cnt2++; update<<<grid, thread>>>(d_lattice, beta, E_d, M_d, E2_d, M2_d, 1, global_state); }else{ update<<<grid, thread>>>(d_lattice, beta, E_d, M_d, E2_d, M2_d, 0, global_state); } cudaDeviceSynchronize(); } // printf("\n"); double energy = 0.0; double magnetization = 0.0; double energy2 = 0.0; double magnetization2 = 0.0; cudaMemcpy(lattice, d_lattice, bytes_E, cudaMemcpyDeviceToHost); cudaMemcpy(E, E_d, bytes_E, cudaMemcpyDeviceToHost); cudaMemcpy(M, M_d, bytes_M, cudaMemcpyDeviceToHost); cudaMemcpy(E2, E2_d, bytes_E, cudaMemcpyDeviceToHost); cudaMemcpy(M2, M2_d, bytes_M, cudaMemcpyDeviceToHost); for(int i = 0; i < LATTICE_2; i++){ energy += E[i]; magnetization += M[i]; energy2 += E2[i]; magnetization2 += M2[i]; } double avg_E = energy / cnt2 / (LATTICE_2 * 1.0) / 2.0; double avg_M = magnetization / cnt2 / (LATTICE_2 * 1.0); avg_M = avg_M < 0 ? -avg_M : avg_M; double avg_E2 = energy2 / cnt2 / (LATTICE_2 * 1.0) / 4.0; double avg_M2 = magnetization2 / cnt2 / (LATTICE_2 * 1.0); double heat_cap = 1.0 * (avg_E2 - avg_E * avg_E) / T / T; double mag_sus = 1.0 * (avg_M2 - avg_M * avg_M) / T; // printf("Average energy: %5f \n", avg_E); // printf("Average magnetization: %5f \n", avg_M); printf("%5f %5f %5f %5f %5f\n", T, avg_E, avg_M, heat_cap, mag_sus); free(lattice); free(E); free(M); free(E2); free(M2); cudaFree(d_lattice); cudaFree(E_d); cudaFree(M_d); cudaFree(E2_d); cudaFree(M2_d); return 0; }
87933baf515d9ee67ec4374cceda833445f4e3b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** C++ client example using sockets */ #include<iostream> //cout #include<stdio.h> //printf #include<string.h> //strlen #include<string> //string #include<cstring> #include<sys/socket.h> //socket #include<arpa/inet.h> //inet_addr #include<netdb.h> //hostent #include<stdlib.h> #include "allConstant.h" #include <unistd.h> #include <thread> #include <chrono> #include "safeQueue.h" #include "quartic.h" #include <map> #include <sstream> #include <iterator> #include <unordered_set> #include <vector> #include <math.h> #include <future> #include <complex> #include <algorithm> #include "Point.h" #include "CycleTrial.h" #include "HyperTrial.h" #include "generalFunction.h" #include "userClass.h" #include "EllipseTrial.h" using namespace std; /** TCP Client class */ class tcp_client { private: int sock; std::string address; int port; struct sockaddr_in server; public: tcp_client(); Queue<string> stringQueue; Queue<string> computingQueue; bool conn(string, int); void listenString(); void repeatSend(); void computationQueueHandle(); bool send_data(string data); void addData(string data); void setupTAS(string tmpID,int epoch, string requestID,int totalStroke); void receive(char buf[]); }; map<string,int> indexMap; User allUser[userSize]; string idList[userSize]; int userNow=-1; int getUser(string tmpID); void setupUser(string tmpID,int epoch,int nodeNum,int anchorNum,int radioRange); void configNodes(string tmpID,int epoch,int nodeID,int X,int Y); void addTrajectory(string tmpID,int epoch,int traInd,int X,int Y); tcp_client::tcp_client() { sock = -1; port = 0; address = ""; } __global__ void goOver3(int n, ellipseTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate3 = data[k].rate3; data[k].grAr=rate3*total; data[k].acAr = total; } } __global__ void bestEllipse(int n, ellipseTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate = total / (data[k].tArea +0.1); float rate3 = rate*rate*rate; if ( rate3<1.1){ data[k].grAr=rate3*total; data[k].acAr = total; data[k].rate3 = rate3; } } } __global__ void goOver2(int n, hyperTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate3 = data[k].rate3; data[k].grAr=rate3*total; data[k].acAr = total; } } __global__ void bestHyper(int n, hyperTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate = total / (data[k].tArea +0.1); float rate3 = rate*rate*rate; if ( rate3<1.1){ data[k].grAr=rate3*total; data[k].acAr = total; data[k].rate3 = rate3; } } } __global__ void bestTwoCycle(int n, twoCycleTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x1=data[k].c1X; float y1=data[k].c1Y; float x2=data[k].c2X; float y2=data[k].c2Y; float r1=data[k].h1*data[k].d; float r2=data[k].h2*data[k].d; float r3=r1-data[k].d; float rr3=r3*r3; float rr1=r1*r1; float rr2=r2*r2; float total=0.0; for(int l=0;l<m;){ float i=area[l++]; float j=area[l++]; float di1=x1-i; float dj1=y1-j; float di2=x2-i; float dj2=y2-j; if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2) total+=1.0; } float rate=total / (data[k].tArea+0.1); float rate3=rate*rate*rate; if (rate3<1.1){ data[k].grAr=rate3*total ; data[k].acAr=total; data[k].rate3=rate3; } } } __global__ void goOver1(int n, twoCycleTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x1=data[k].c1X; float y1=data[k].c1Y; float x2=data[k].c2X; float y2=data[k].c2Y; float r1=data[k].h1*data[k].d; float r2=data[k].h2*data[k].d; float r3=r1-data[k].d; float rr3=r3*r3; float rr1=r1*r1; float rr2=r2*r2; float total=0.0; for(int l=0;l<m;){ float i=area[l++]; float j=area[l++]; float di1=x1-i; float dj1=y1-j; float di2=x2-i; float dj2=y2-j; if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2) total+=1.0; } float rate3=data[k].rate3; data[k].grAr=rate3*total ; data[k].acAr=total; } } int getUser(string tmpID){ if(indexMap.find(tmpID)==indexMap.end()){ User curUser=User(tmpID); userNow=(userNow+1)%userSize; cout<<"Setting up a new User :"<<tmpID<<" at: "<<userNow<<endl; if(idList[userNow].length()>0)indexMap.erase(idList[userNow]); idList[userNow]=tmpID; indexMap.insert(make_pair(tmpID,userNow)); allUser[userNow]=curUser; return userNow; } else{ return indexMap[tmpID]; } } void setupUser(string tmpID,int epoch,int nodeNum,int anchorNum,int radioRange) { User& curUser = allUser[getUser(tmpID)]; //cout<<epoch<<" : "<<curUser.getEpoch()<<endl; if(epoch >= curUser.getEpoch()) { curUser.setNodes(nodeNum); curUser.setAnchor(anchorNum); curUser.setEpoch(epoch); curUser.setRange(radioRange); cout<<"setting up user: "<< tmpID <<" in: "<<getUser(tmpID)<<endl; } } void addTrajectory(string tmpID,int epoch,int traInd, int X,int Y) { User& curUser = allUser[getUser(tmpID)]; //cout<<epoch<<" : "<<curUser.getEpoch()<<endl; if(epoch >= curUser.getEpoch()) { curUser.setEpoch(epoch); curUser.setTraj(X,Y,traInd); } } void tcp_client::setupTAS(string tmpID,int epoch, string requestID, int totalStroke) { //cout<<"setting TAS"<<endl; User& curUser = allUser[getUser(tmpID)]; //cout<<epoch<<" : "<<curUser.getEpoch()<<endl; if(epoch >= curUser.getEpoch() && curUser.resultMap.find(requestID)==curUser.resultMap.end()) { //cout<<"push to computing Queue for : " <<tmpID+" "+requestID<<endl; curUser.resultMap.insert(make_pair(requestID,"")); curUser.genTAS(totalStroke); computingQueue.push(tmpID+" "+requestID); //cout<<"push to computing Queue for : " <<tmpID+" "+requestID<<endl; //curUser.printArea(); } else { cout<<"The computation requestion already in processing"<<endl; } } void configNodes(string tmpID,int epoch,int nodeID,int X,int Y) { User& curUser = allUser[getUser(tmpID)]; if(epoch >= curUser.getEpoch()) { curUser.setX(nodeID,X); curUser.setY(nodeID,Y); } } string getRoutingMSG2(string userRequest) { string res=""; stringstream ur(userRequest); string tmpID; string requestID; ur>>tmpID; ur>>requestID; User& curUser = allUser[getUser(tmpID)]; float *TAS,*d_TAS; int tasSize=curUser.TAS.size(); TAS=(float*)malloc(sizeof(float)*tasSize*2); hipMalloc((void**)&d_TAS, sizeof(float) *tasSize*2); int counter=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[counter++]=x; TAS[counter++]=y; } hipMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, hipMemcpyHostToDevice); //cout<<"I'm OK Here"<<endl; short hv[nodeSize][anchorSize]; curUser.getHopInfo(hv); vector<ellipseTrial> ellipseTrials=curUser.findEllipseTrial(hv); counter=0; ellipseTrial *eTri; ellipseTrial *d_eTri; if(!ellipseTrials.empty()) { eTri=(ellipseTrial*)malloc(sizeof(ellipseTrial)*ellipseTrials.size()); //cout<<"I'm OK after here"<<endl; for(ellipseTrial et: ellipseTrials) { eTri[counter++]=et; } hipMalloc((void**)&d_eTri, sizeof(ellipseTrial) * ellipseTrials.size()); hipMemcpy(d_eTri,eTri,sizeof(ellipseTrial) * ellipseTrials.size(),hipMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<ellipseTrials.size()<<endl; hipLaunchKernelGGL(( bestEllipse), dim3(2048),dim3(256), 0, 0, ellipseTrials.size(),d_eTri,d_TAS,tasSize*2); //hipFree(d_cTri); } vector<hyperTrial> hyperTrials=curUser.findHyperTrial(hv); //cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl; hyperTrial *hTri; hyperTrial *d_hTri; int counter2=0; if(!hyperTrials.empty()) { hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size()); //cout<<"I'm OK after here"<<endl; for(hyperTrial ht: hyperTrials) { hTri[counter2++]=ht; } hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size()); hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl; cout<<"********TAS size is: "<<tasSize<<"******"<<endl; hipLaunchKernelGGL(( bestHyper), dim3(2048),dim3(256), 0, 0, hyperTrials.size(),d_hTri,d_TAS,tasSize*2); //hipDeviceSynchronize(); //hipFree(d_hTri); } if(ellipseTrials.empty() || hyperTrials.empty())return "No result"; hipDeviceSynchronize(); hipMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *ellipseTrials.size(),hipMemcpyDeviceToHost); hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyDeviceToHost); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl; res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS); sort(eTri, eTri+counter, sortEllipseTrial); sort(hTri, hTri+counter2, sortHyperTrial); counter = 1000000 < counter ? 1000000 : counter; counter2 = 5000000 < counter2 ? 5000000 : counter2; //hipMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *counter); hipMemcpy(d_eTri,eTri,sizeof(ellipseTrial) *counter, hipMemcpyHostToDevice); //hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2); hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, hipMemcpyHostToDevice); int newSize = 0; do { newSize=curUser.TAS.size(); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl; int tasInd=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[tasInd++]=x; TAS[tasInd++]=y; } //hipFree(d_TAS); //hipMalloc((void**)&d_TAS, sizeof(float) *newSize*2); hipMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, hipMemcpyHostToDevice); hipLaunchKernelGGL(( goOver3), dim3(2048),dim3(256), 0, 0, counter, d_eTri, d_TAS, newSize*2); hipLaunchKernelGGL(( goOver2), dim3(2048),dim3(256), 0, 0, counter2, d_hTri, d_TAS, newSize*2); hipDeviceSynchronize(); //free(cTri); //free(hTri); //cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter); //hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2); hipMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *counter,hipMemcpyDeviceToHost); hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,hipMemcpyDeviceToHost); res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS); }while(newSize>0.15*tasSize); hipFree(d_eTri); hipFree(d_hTri); hipFree(d_TAS); free(TAS); free(eTri); free(hTri); return res; } string getRoutingMSG(string userRequest) { string res=""; stringstream ur(userRequest); string tmpID; string requestID; ur>>tmpID; ur>>requestID; User& curUser = allUser[getUser(tmpID)]; float *TAS,*d_TAS; int tasSize=curUser.TAS.size(); TAS=(float*)malloc(sizeof(float)*tasSize*2); hipMalloc((void**)&d_TAS, sizeof(float) *tasSize*2); int counter=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[counter++]=x; TAS[counter++]=y; } hipMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, hipMemcpyHostToDevice); //cout<<"I'm OK Here"<<endl; short hv[nodeSize][anchorSize]; curUser.getHopInfo(hv); vector<twoCycleTrial> cycleTrials=curUser.findTwoCycleTrial(hv); counter=0; twoCycleTrial *cTri; twoCycleTrial *d_cTri; if(!cycleTrials.empty()) { cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*cycleTrials.size()); //cout<<"I'm OK after here"<<endl; for(twoCycleTrial ct: cycleTrials) { cTri[counter++]=ct; } hipMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *cycleTrials.size()); hipMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *cycleTrials.size(),hipMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<cycleTrials.size()<<endl; hipLaunchKernelGGL(( bestTwoCycle), dim3(2048),dim3(256), 0, 0, cycleTrials.size(),d_cTri,d_TAS,tasSize*2); //hipFree(d_cTri); } vector<hyperTrial> hyperTrials=curUser.findHyperTrial(hv); //cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl; hyperTrial *hTri; hyperTrial *d_hTri; int counter2=0; if(!hyperTrials.empty()) { hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size()); //cout<<"I'm OK after here"<<endl; for(hyperTrial ht: hyperTrials) { hTri[counter2++]=ht; } hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size()); hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl; cout<<"********TAS size is: "<<tasSize<<"******"<<endl; hipLaunchKernelGGL(( bestHyper), dim3(2048),dim3(256), 0, 0, hyperTrials.size(),d_hTri,d_TAS,tasSize*2); //hipDeviceSynchronize(); //hipFree(d_hTri); } if(cycleTrials.empty() || hyperTrials.empty())return "No result"; hipDeviceSynchronize(); hipMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *cycleTrials.size(),hipMemcpyDeviceToHost); hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),hipMemcpyDeviceToHost); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl; res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS); sort(cTri, cTri+counter, sortCycleTrial); sort(hTri, hTri+counter2, sortHyperTrial); counter = 1000000 < counter ? 1000000 : counter; counter2 = 5000000 < counter2 ? 5000000 : counter2; //hipMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *counter); hipMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *counter, hipMemcpyHostToDevice); //hipMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2); hipMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, hipMemcpyHostToDevice); int newSize = 0; do { newSize=curUser.TAS.size(); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl; int tasInd=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[tasInd++]=x; TAS[tasInd++]=y; } //hipFree(d_TAS); //hipMalloc((void**)&d_TAS, sizeof(float) *newSize*2); hipMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, hipMemcpyHostToDevice); hipLaunchKernelGGL(( goOver1), dim3(2048),dim3(256), 0, 0, counter, d_cTri, d_TAS, newSize*2); hipLaunchKernelGGL(( goOver2), dim3(2048),dim3(256), 0, 0, counter2, d_hTri, d_TAS, newSize*2); hipDeviceSynchronize(); //free(cTri); //free(hTri); //cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter); //hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2); hipMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *counter,hipMemcpyDeviceToHost); hipMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,hipMemcpyDeviceToHost); res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS); }while(newSize>0.15*tasSize); hipFree(d_cTri); hipFree(d_hTri); hipFree(d_TAS); free(TAS); free(cTri); free(hTri); return res; } void tcp_client::addData(string data) { int begin=data.find("*,"); int end=data.find(",*",begin+1); //cout<<data<<endl; if(begin>=0&&end>begin) { string realData=data.substr(begin+2,end-begin-2); int length=realData.length(); int comma=0; for(int i=0;i<length;i++){ if(realData[i]==',')comma++; } //cout<<realData<<endl; if(comma==5){ int first=realData.find(","); string tmpID=realData.substr(0,first); //cout<<tmpID.length()<<endl; if(tmpID.length()!=8 || tmpID.find("0.") == string::npos )return; int second=realData.find(",",first+1); int third=realData.find(",",second+1); int fourth=realData.find(",",third+1); int fifth=realData.find(",",fourth+1); int epoch=myStoi(realData.substr(first+1,second-first)); int mod=myStoi(realData.substr(second+1,third-second)); //cout<<mod<<endl; if(mod==0){ int nodeNum=myStoi(realData.substr(third+1,fourth-third)); int anchorNum=myStoi(realData.substr(fourth+1,fifth-fourth)); int radioRange=myStoi(realData.substr(fifth+1,length-fourth)); //cout<<"0: "<<tmpID<<":"<<epoch<<endl; if(nodeNum<nodeSize&&anchorNum<anchorSize&&radioRange<=200&&nodeNum>0&&anchorNum>0&&radioRange>0) setupUser(tmpID,epoch,nodeNum,anchorNum,radioRange); } if(mod==1){ int nodeID=myStoi(realData.substr(third+1,fourth-third)); int X=myStoi(realData.substr(fourth+1,fifth-fourth)); int Y=myStoi(realData.substr(fifth+1,length-fourth)); if(nodeID<nodeSize&&nodeID>=0) configNodes(tmpID,epoch,nodeID,X,Y); } if(mod==3){ int traInd=myStoi(realData.substr(third+1,fourth-third)); int X=myStoi(realData.substr(fourth+1,fifth-fourth)); int Y=myStoi(realData.substr(fifth+1,length-fourth)); if(traInd<strokeSize&&traInd>=0) addTrajectory(tmpID,epoch,traInd,X,Y); } if(mod==4){ //cout<<"third: "<<third<<"fourth: "<<fourth<<endl; string requestID=realData.substr(third+1,fourth-third-1); int totalStroke=myStoi(realData.substr(fourth+1,fifth-fourth)); if(totalStroke<strokeSize) { cout<<"request realDATA "<<realData<<endl; cout<<"here is the requestID "<<requestID<<endl; setupTAS(tmpID,epoch,requestID, totalStroke); } } } } else { //cout<<"here is nonesense: "<<data<<endl; } } /** Connect to a host on a certain port number */ bool tcp_client::conn(string address , int port) { //create socket if it is not already created if(sock == -1) { //Create socket sock = socket(AF_INET , SOCK_STREAM , 0); if (sock == -1) { perror("Could not create socket"); } cout<<"Socket created\n"; } else { /* OK , nothing */ } //setup address structure if(inet_addr(address.c_str()) == -1) { struct hostent *he; struct in_addr **addr_list; //resolve the hostname, its not an ip address if ( (he = gethostbyname( address.c_str() ) ) == NULL) { //gethostbyname failed herror("gethostbyname"); cout<<"Failed to resolve hostname\n"; return false; } //Cast the h_addr_list to in_addr , since h_addr_list also has the ip address in long format only addr_list = (struct in_addr **) he->h_addr_list; for(int i = 0; addr_list[i] != NULL; i++) { //strcpy(ip , inet_ntoa(*addr_list[i]) ); server.sin_addr = *addr_list[i]; cout<<address<<" resolved to "<<inet_ntoa(*addr_list[i])<<endl; break; } } //plain ip address else { server.sin_addr.s_addr = inet_addr( address.c_str() ); } server.sin_family = AF_INET; server.sin_port = htons( port ); //Connect to remote server if (connect(sock , (struct sockaddr *)&server , sizeof(server)) < 0) { perror("connect failed. Error"); return 1; } cout<<"Connected\n"; return true; } /** Send data to the connected host */ bool tcp_client::send_data(string data) { //Send some data if( send(sock , data.c_str() , strlen( data.c_str() ) , 0) < 0) { perror("Send failed : "); return false; } return true; } /** Receive data from the connected host */ void tcp_client::receive(char buffer[]) { buffer[0]=' '; //Receive a reply from the server if( recv(sock , buffer , 1024 , 0) < 0) { puts("recv failed"); } string to; stringstream ss(buffer); if(buffer!=NULL) { while(getline(ss,to,'\n')){ stringQueue.push(to); } } receive(buffer); } void tcp_client::repeatSend() { unsigned long counter=0; while(1) { send_data(to_string(counter++)); this_thread::sleep_for(chrono::milliseconds(5000)); } } void tcp_client::listenString() { while(1) { if(!stringQueue.isEmpty()) { string popString=stringQueue.pop(); addData(popString); } this_thread::sleep_for(chrono::microseconds(100)); } } void tcp_client::computationQueueHandle() { while(1) { if(!computingQueue.isEmpty()) { string popString=computingQueue.pop(); if(popString.size()> 10){ cout<<"computing "<<popString<<endl; string res=getRoutingMSG2(popString); cout<<"======Result: "<<res<<endl; send_data(popString+res); send_data(popString+res); } } this_thread::sleep_for(chrono::microseconds(100)); } } int main(int argc , char *argv[]) { srand(time(NULL)); ///* tcp_client c ; string host="131.151.90.6"; //connect to host c.conn(host , 6267); thread stringListeningThread(&tcp_client::listenString, &c); thread hh(&tcp_client::repeatSend,&c); thread cq(&tcp_client::computationQueueHandle,&c); //receive and echo reply char buffer[4096]; c.receive(buffer); //*/ return 0; }
87933baf515d9ee67ec4374cceda833445f4e3b8.cu
/** C++ client example using sockets */ #include<iostream> //cout #include<stdio.h> //printf #include<string.h> //strlen #include<string> //string #include<cstring> #include<sys/socket.h> //socket #include<arpa/inet.h> //inet_addr #include<netdb.h> //hostent #include<stdlib.h> #include "allConstant.h" #include <unistd.h> #include <thread> #include <chrono> #include "safeQueue.h" #include "quartic.h" #include <map> #include <sstream> #include <iterator> #include <unordered_set> #include <vector> #include <math.h> #include <future> #include <complex> #include <algorithm> #include "Point.h" #include "CycleTrial.h" #include "HyperTrial.h" #include "generalFunction.h" #include "userClass.h" #include "EllipseTrial.h" using namespace std; /** TCP Client class */ class tcp_client { private: int sock; std::string address; int port; struct sockaddr_in server; public: tcp_client(); Queue<string> stringQueue; Queue<string> computingQueue; bool conn(string, int); void listenString(); void repeatSend(); void computationQueueHandle(); bool send_data(string data); void addData(string data); void setupTAS(string tmpID,int epoch, string requestID,int totalStroke); void receive(char buf[]); }; map<string,int> indexMap; User allUser[userSize]; string idList[userSize]; int userNow=-1; int getUser(string tmpID); void setupUser(string tmpID,int epoch,int nodeNum,int anchorNum,int radioRange); void configNodes(string tmpID,int epoch,int nodeID,int X,int Y); void addTrajectory(string tmpID,int epoch,int traInd,int X,int Y); tcp_client::tcp_client() { sock = -1; port = 0; address = ""; } __global__ void goOver3(int n, ellipseTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate3 = data[k].rate3; data[k].grAr=rate3*total; data[k].acAr = total; } } __global__ void bestEllipse(int n, ellipseTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))+sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate = total / (data[k].tArea +0.1); float rate3 = rate*rate*rate; if ( rate3<1.1){ data[k].grAr=rate3*total; data[k].acAr = total; data[k].rate3 = rate3; } } } __global__ void goOver2(int n, hyperTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate3 = data[k].rate3; data[k].grAr=rate3*total; data[k].acAr = total; } } __global__ void bestHyper(int n, hyperTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x = data[k].c3X; float y = data[k].c3Y; float r = data[k].h3 * data[k].avgD2; float a = data[k].ah * data[k].avgD1; float h1x = data[k].c1X; float h1y = data[k].c1Y; float h2x = data[k].c2X; float h2y = data[k].c2Y; float rr = r*r; float a2 = (data[k].ah-1.0)*data[k].avgD1; float total =0.0; for(int l = 0; l<m;){ float i = area[l++]; float j = area[l++]; float tt = sqrtf((i-h1x)*(i-h1x)+(j-h1y)*(j-h1y))-sqrtf((i-h2x)*(i-h2x)+(j-h2y)*(j-h2y)); float di = x-i; float dj = y-j; if(di * di + dj * dj <= rr && tt <= 2*a && tt >= 2* a2) total+=1.0; } float rate = total / (data[k].tArea +0.1); float rate3 = rate*rate*rate; if ( rate3<1.1){ data[k].grAr=rate3*total; data[k].acAr = total; data[k].rate3 = rate3; } } } __global__ void bestTwoCycle(int n, twoCycleTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x1=data[k].c1X; float y1=data[k].c1Y; float x2=data[k].c2X; float y2=data[k].c2Y; float r1=data[k].h1*data[k].d; float r2=data[k].h2*data[k].d; float r3=r1-data[k].d; float rr3=r3*r3; float rr1=r1*r1; float rr2=r2*r2; float total=0.0; for(int l=0;l<m;){ float i=area[l++]; float j=area[l++]; float di1=x1-i; float dj1=y1-j; float di2=x2-i; float dj2=y2-j; if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2) total+=1.0; } float rate=total / (data[k].tArea+0.1); float rate3=rate*rate*rate; if (rate3<1.1){ data[k].grAr=rate3*total ; data[k].acAr=total; data[k].rate3=rate3; } } } __global__ void goOver1(int n, twoCycleTrial *data, float *area,int m){ int index = threadIdx.x+blockIdx.x*blockDim.x; int stride=blockDim.x*gridDim.x; for(int k=index;k<n;k+=stride){ float x1=data[k].c1X; float y1=data[k].c1Y; float x2=data[k].c2X; float y2=data[k].c2Y; float r1=data[k].h1*data[k].d; float r2=data[k].h2*data[k].d; float r3=r1-data[k].d; float rr3=r3*r3; float rr1=r1*r1; float rr2=r2*r2; float total=0.0; for(int l=0;l<m;){ float i=area[l++]; float j=area[l++]; float di1=x1-i; float dj1=y1-j; float di2=x2-i; float dj2=y2-j; if (di1*di1+dj1*dj1<=rr1 && di1*di1+dj1*dj1>rr3 && di2*di2+dj2*dj2<=rr2) total+=1.0; } float rate3=data[k].rate3; data[k].grAr=rate3*total ; data[k].acAr=total; } } int getUser(string tmpID){ if(indexMap.find(tmpID)==indexMap.end()){ User curUser=User(tmpID); userNow=(userNow+1)%userSize; cout<<"Setting up a new User :"<<tmpID<<" at: "<<userNow<<endl; if(idList[userNow].length()>0)indexMap.erase(idList[userNow]); idList[userNow]=tmpID; indexMap.insert(make_pair(tmpID,userNow)); allUser[userNow]=curUser; return userNow; } else{ return indexMap[tmpID]; } } void setupUser(string tmpID,int epoch,int nodeNum,int anchorNum,int radioRange) { User& curUser = allUser[getUser(tmpID)]; //cout<<epoch<<" : "<<curUser.getEpoch()<<endl; if(epoch >= curUser.getEpoch()) { curUser.setNodes(nodeNum); curUser.setAnchor(anchorNum); curUser.setEpoch(epoch); curUser.setRange(radioRange); cout<<"setting up user: "<< tmpID <<" in: "<<getUser(tmpID)<<endl; } } void addTrajectory(string tmpID,int epoch,int traInd, int X,int Y) { User& curUser = allUser[getUser(tmpID)]; //cout<<epoch<<" : "<<curUser.getEpoch()<<endl; if(epoch >= curUser.getEpoch()) { curUser.setEpoch(epoch); curUser.setTraj(X,Y,traInd); } } void tcp_client::setupTAS(string tmpID,int epoch, string requestID, int totalStroke) { //cout<<"setting TAS"<<endl; User& curUser = allUser[getUser(tmpID)]; //cout<<epoch<<" : "<<curUser.getEpoch()<<endl; if(epoch >= curUser.getEpoch() && curUser.resultMap.find(requestID)==curUser.resultMap.end()) { //cout<<"push to computing Queue for : " <<tmpID+" "+requestID<<endl; curUser.resultMap.insert(make_pair(requestID,"")); curUser.genTAS(totalStroke); computingQueue.push(tmpID+" "+requestID); //cout<<"push to computing Queue for : " <<tmpID+" "+requestID<<endl; //curUser.printArea(); } else { cout<<"The computation requestion already in processing"<<endl; } } void configNodes(string tmpID,int epoch,int nodeID,int X,int Y) { User& curUser = allUser[getUser(tmpID)]; if(epoch >= curUser.getEpoch()) { curUser.setX(nodeID,X); curUser.setY(nodeID,Y); } } string getRoutingMSG2(string userRequest) { string res=""; stringstream ur(userRequest); string tmpID; string requestID; ur>>tmpID; ur>>requestID; User& curUser = allUser[getUser(tmpID)]; float *TAS,*d_TAS; int tasSize=curUser.TAS.size(); TAS=(float*)malloc(sizeof(float)*tasSize*2); cudaMalloc((void**)&d_TAS, sizeof(float) *tasSize*2); int counter=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[counter++]=x; TAS[counter++]=y; } cudaMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, cudaMemcpyHostToDevice); //cout<<"I'm OK Here"<<endl; short hv[nodeSize][anchorSize]; curUser.getHopInfo(hv); vector<ellipseTrial> ellipseTrials=curUser.findEllipseTrial(hv); counter=0; ellipseTrial *eTri; ellipseTrial *d_eTri; if(!ellipseTrials.empty()) { eTri=(ellipseTrial*)malloc(sizeof(ellipseTrial)*ellipseTrials.size()); //cout<<"I'm OK after here"<<endl; for(ellipseTrial et: ellipseTrials) { eTri[counter++]=et; } cudaMalloc((void**)&d_eTri, sizeof(ellipseTrial) * ellipseTrials.size()); cudaMemcpy(d_eTri,eTri,sizeof(ellipseTrial) * ellipseTrials.size(),cudaMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<ellipseTrials.size()<<endl; bestEllipse<<<2048,256>>>(ellipseTrials.size(),d_eTri,d_TAS,tasSize*2); //cudaFree(d_cTri); } vector<hyperTrial> hyperTrials=curUser.findHyperTrial(hv); //cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl; hyperTrial *hTri; hyperTrial *d_hTri; int counter2=0; if(!hyperTrials.empty()) { hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size()); //cout<<"I'm OK after here"<<endl; for(hyperTrial ht: hyperTrials) { hTri[counter2++]=ht; } cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size()); cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl; cout<<"********TAS size is: "<<tasSize<<"******"<<endl; bestHyper<<<2048,256>>>(hyperTrials.size(),d_hTri,d_TAS,tasSize*2); //cudaDeviceSynchronize(); //cudaFree(d_hTri); } if(ellipseTrials.empty() || hyperTrials.empty())return "No result"; cudaDeviceSynchronize(); cudaMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *ellipseTrials.size(),cudaMemcpyDeviceToHost); cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyDeviceToHost); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl; res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS); sort(eTri, eTri+counter, sortEllipseTrial); sort(hTri, hTri+counter2, sortHyperTrial); counter = 1000000 < counter ? 1000000 : counter; counter2 = 5000000 < counter2 ? 5000000 : counter2; //cudaMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *counter); cudaMemcpy(d_eTri,eTri,sizeof(ellipseTrial) *counter, cudaMemcpyHostToDevice); //cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2); cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, cudaMemcpyHostToDevice); int newSize = 0; do { newSize=curUser.TAS.size(); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl; int tasInd=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[tasInd++]=x; TAS[tasInd++]=y; } //cudaFree(d_TAS); //cudaMalloc((void**)&d_TAS, sizeof(float) *newSize*2); cudaMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, cudaMemcpyHostToDevice); goOver3<<<2048,256>>>(counter, d_eTri, d_TAS, newSize*2); goOver2<<<2048,256>>>(counter2, d_hTri, d_TAS, newSize*2); cudaDeviceSynchronize(); //free(cTri); //free(hTri); //cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter); //hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2); cudaMemcpy(eTri,d_eTri,sizeof(ellipseTrial) *counter,cudaMemcpyDeviceToHost); cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,cudaMemcpyDeviceToHost); res+=findBestTry2(eTri,hTri, counter, counter2, curUser.TAS); }while(newSize>0.15*tasSize); cudaFree(d_eTri); cudaFree(d_hTri); cudaFree(d_TAS); free(TAS); free(eTri); free(hTri); return res; } string getRoutingMSG(string userRequest) { string res=""; stringstream ur(userRequest); string tmpID; string requestID; ur>>tmpID; ur>>requestID; User& curUser = allUser[getUser(tmpID)]; float *TAS,*d_TAS; int tasSize=curUser.TAS.size(); TAS=(float*)malloc(sizeof(float)*tasSize*2); cudaMalloc((void**)&d_TAS, sizeof(float) *tasSize*2); int counter=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[counter++]=x; TAS[counter++]=y; } cudaMemcpy(d_TAS, TAS, sizeof(float) *tasSize*2, cudaMemcpyHostToDevice); //cout<<"I'm OK Here"<<endl; short hv[nodeSize][anchorSize]; curUser.getHopInfo(hv); vector<twoCycleTrial> cycleTrials=curUser.findTwoCycleTrial(hv); counter=0; twoCycleTrial *cTri; twoCycleTrial *d_cTri; if(!cycleTrials.empty()) { cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*cycleTrials.size()); //cout<<"I'm OK after here"<<endl; for(twoCycleTrial ct: cycleTrials) { cTri[counter++]=ct; } cudaMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *cycleTrials.size()); cudaMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *cycleTrials.size(),cudaMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<cycleTrials.size()<<endl; bestTwoCycle<<<2048,256>>>(cycleTrials.size(),d_cTri,d_TAS,tasSize*2); //cudaFree(d_cTri); } vector<hyperTrial> hyperTrials=curUser.findHyperTrial(hv); //cout<<"number of hyperTrial: "<<hyperTrials.size()<<endl; hyperTrial *hTri; hyperTrial *d_hTri; int counter2=0; if(!hyperTrials.empty()) { hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*hyperTrials.size()); //cout<<"I'm OK after here"<<endl; for(hyperTrial ht: hyperTrials) { hTri[counter2++]=ht; } cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *hyperTrials.size()); cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyHostToDevice); cout<<"finish copy totoal trial: "<<hyperTrials.size()<<endl; cout<<"********TAS size is: "<<tasSize<<"******"<<endl; bestHyper<<<2048,256>>>(hyperTrials.size(),d_hTri,d_TAS,tasSize*2); //cudaDeviceSynchronize(); //cudaFree(d_hTri); } if(cycleTrials.empty() || hyperTrials.empty())return "No result"; cudaDeviceSynchronize(); cudaMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *cycleTrials.size(),cudaMemcpyDeviceToHost); cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *hyperTrials.size(),cudaMemcpyDeviceToHost); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<tasSize<<endl; res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS); sort(cTri, cTri+counter, sortCycleTrial); sort(hTri, hTri+counter2, sortHyperTrial); counter = 1000000 < counter ? 1000000 : counter; counter2 = 5000000 < counter2 ? 5000000 : counter2; //cudaMalloc((void**)&d_cTri, sizeof(twoCycleTrial) *counter); cudaMemcpy(d_cTri,cTri,sizeof(twoCycleTrial) *counter, cudaMemcpyHostToDevice); //cudaMalloc((void**)&d_hTri, sizeof(hyperTrial) *counter2); cudaMemcpy(d_hTri,hTri,sizeof(hyperTrial) *counter2, cudaMemcpyHostToDevice); int newSize = 0; do { newSize=curUser.TAS.size(); cout<<counter<<" ||||||| "<<counter2<<" |||||||||| "<<newSize<<endl; int tasInd=0; for(string t:curUser.TAS) { stringstream tt(t); float x,y; tt>>x; tt>>y; TAS[tasInd++]=x; TAS[tasInd++]=y; } //cudaFree(d_TAS); //cudaMalloc((void**)&d_TAS, sizeof(float) *newSize*2); cudaMemcpy(d_TAS, TAS, sizeof(float) *newSize*2, cudaMemcpyHostToDevice); goOver1<<<2048,256>>>(counter, d_cTri, d_TAS, newSize*2); goOver2<<<2048,256>>>(counter2, d_hTri, d_TAS, newSize*2); cudaDeviceSynchronize(); //free(cTri); //free(hTri); //cTri=(twoCycleTrial*)malloc(sizeof(twoCycleTrial)*counter); //hTri=(hyperTrial*)malloc(sizeof(hyperTrial)*counter2); cudaMemcpy(cTri,d_cTri,sizeof(twoCycleTrial) *counter,cudaMemcpyDeviceToHost); cudaMemcpy(hTri,d_hTri,sizeof(hyperTrial) *counter2,cudaMemcpyDeviceToHost); res+=findBestTry(cTri,hTri, counter, counter2, curUser.TAS); }while(newSize>0.15*tasSize); cudaFree(d_cTri); cudaFree(d_hTri); cudaFree(d_TAS); free(TAS); free(cTri); free(hTri); return res; } void tcp_client::addData(string data) { int begin=data.find("*,"); int end=data.find(",*",begin+1); //cout<<data<<endl; if(begin>=0&&end>begin) { string realData=data.substr(begin+2,end-begin-2); int length=realData.length(); int comma=0; for(int i=0;i<length;i++){ if(realData[i]==',')comma++; } //cout<<realData<<endl; if(comma==5){ int first=realData.find(","); string tmpID=realData.substr(0,first); //cout<<tmpID.length()<<endl; if(tmpID.length()!=8 || tmpID.find("0.") == string::npos )return; int second=realData.find(",",first+1); int third=realData.find(",",second+1); int fourth=realData.find(",",third+1); int fifth=realData.find(",",fourth+1); int epoch=myStoi(realData.substr(first+1,second-first)); int mod=myStoi(realData.substr(second+1,third-second)); //cout<<mod<<endl; if(mod==0){ int nodeNum=myStoi(realData.substr(third+1,fourth-third)); int anchorNum=myStoi(realData.substr(fourth+1,fifth-fourth)); int radioRange=myStoi(realData.substr(fifth+1,length-fourth)); //cout<<"0: "<<tmpID<<":"<<epoch<<endl; if(nodeNum<nodeSize&&anchorNum<anchorSize&&radioRange<=200&&nodeNum>0&&anchorNum>0&&radioRange>0) setupUser(tmpID,epoch,nodeNum,anchorNum,radioRange); } if(mod==1){ int nodeID=myStoi(realData.substr(third+1,fourth-third)); int X=myStoi(realData.substr(fourth+1,fifth-fourth)); int Y=myStoi(realData.substr(fifth+1,length-fourth)); if(nodeID<nodeSize&&nodeID>=0) configNodes(tmpID,epoch,nodeID,X,Y); } if(mod==3){ int traInd=myStoi(realData.substr(third+1,fourth-third)); int X=myStoi(realData.substr(fourth+1,fifth-fourth)); int Y=myStoi(realData.substr(fifth+1,length-fourth)); if(traInd<strokeSize&&traInd>=0) addTrajectory(tmpID,epoch,traInd,X,Y); } if(mod==4){ //cout<<"third: "<<third<<"fourth: "<<fourth<<endl; string requestID=realData.substr(third+1,fourth-third-1); int totalStroke=myStoi(realData.substr(fourth+1,fifth-fourth)); if(totalStroke<strokeSize) { cout<<"request realDATA "<<realData<<endl; cout<<"here is the requestID "<<requestID<<endl; setupTAS(tmpID,epoch,requestID, totalStroke); } } } } else { //cout<<"here is nonesense: "<<data<<endl; } } /** Connect to a host on a certain port number */ bool tcp_client::conn(string address , int port) { //create socket if it is not already created if(sock == -1) { //Create socket sock = socket(AF_INET , SOCK_STREAM , 0); if (sock == -1) { perror("Could not create socket"); } cout<<"Socket created\n"; } else { /* OK , nothing */ } //setup address structure if(inet_addr(address.c_str()) == -1) { struct hostent *he; struct in_addr **addr_list; //resolve the hostname, its not an ip address if ( (he = gethostbyname( address.c_str() ) ) == NULL) { //gethostbyname failed herror("gethostbyname"); cout<<"Failed to resolve hostname\n"; return false; } //Cast the h_addr_list to in_addr , since h_addr_list also has the ip address in long format only addr_list = (struct in_addr **) he->h_addr_list; for(int i = 0; addr_list[i] != NULL; i++) { //strcpy(ip , inet_ntoa(*addr_list[i]) ); server.sin_addr = *addr_list[i]; cout<<address<<" resolved to "<<inet_ntoa(*addr_list[i])<<endl; break; } } //plain ip address else { server.sin_addr.s_addr = inet_addr( address.c_str() ); } server.sin_family = AF_INET; server.sin_port = htons( port ); //Connect to remote server if (connect(sock , (struct sockaddr *)&server , sizeof(server)) < 0) { perror("connect failed. Error"); return 1; } cout<<"Connected\n"; return true; } /** Send data to the connected host */ bool tcp_client::send_data(string data) { //Send some data if( send(sock , data.c_str() , strlen( data.c_str() ) , 0) < 0) { perror("Send failed : "); return false; } return true; } /** Receive data from the connected host */ void tcp_client::receive(char buffer[]) { buffer[0]=' '; //Receive a reply from the server if( recv(sock , buffer , 1024 , 0) < 0) { puts("recv failed"); } string to; stringstream ss(buffer); if(buffer!=NULL) { while(getline(ss,to,'\n')){ stringQueue.push(to); } } receive(buffer); } void tcp_client::repeatSend() { unsigned long counter=0; while(1) { send_data(to_string(counter++)); this_thread::sleep_for(chrono::milliseconds(5000)); } } void tcp_client::listenString() { while(1) { if(!stringQueue.isEmpty()) { string popString=stringQueue.pop(); addData(popString); } this_thread::sleep_for(chrono::microseconds(100)); } } void tcp_client::computationQueueHandle() { while(1) { if(!computingQueue.isEmpty()) { string popString=computingQueue.pop(); if(popString.size()> 10){ cout<<"computing "<<popString<<endl; string res=getRoutingMSG2(popString); cout<<"======Result: "<<res<<endl; send_data(popString+res); send_data(popString+res); } } this_thread::sleep_for(chrono::microseconds(100)); } } int main(int argc , char *argv[]) { srand(time(NULL)); ///* tcp_client c ; string host="131.151.90.6"; //connect to host c.conn(host , 6267); thread stringListeningThread(&tcp_client::listenString, &c); thread hh(&tcp_client::repeatSend,&c); thread cq(&tcp_client::computationQueueHandle,&c); //receive and echo reply char buffer[4096]; c.receive(buffer); //*/ return 0; }
342db1d09852b7ad99db84604986f621e2b37306.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _NBODY_KERNEL_H_ #define _NBODY_KERNEL_H_ #include "config.h" #define prime 120247 typedef unsigned int uint; //! Cuda random functions from "Kurs: procesory graficzne w obliczeniach rwnolegych (CUDA)" 2012 __device__ uint TausStep(uint &z, int S1, int S2, int S3, uint M) { uint b=(((z << S1) ^ z) >> S2); return z = (((z & M) << S3) ^ b); } __device__ uint LCGStep(uint &z, uint A, uint C) { return z=(A*z+C); } __device__ uint HybridTausInt(uint &z1, uint &z2, uint &z3, uint &z4) { return ( TausStep(z1, 13, 19, 12, 4294967294UL) ^ LCGStep( z4, 1664525, 1013904223UL) ); } __device__ uint funct(uint id) { return HybridTausInt(id,id,id,id); } __device__ inline float square(float a) { return a*a; } __device__ inline float distSquare2(float4 posA, float4 posB) { return square((posA.x-posB.x) + square(posA.y-posB.y) + square(posA.z-posB.z)); } __device__ inline float distSquare1(float4 posA) { float f = (square(posA.x*distanceMultiplier) + square(posA.y*distanceMultiplier) + square(posA.z*distanceMultiplier)); if(f < 1.0f) f = 1.0f; return f; } __device__ inline float dist1(float4 posA) { return sqrt(distSquare1(posA)); } __device__ inline float4 sub(float4 a, float4 b){ float4 r; r.x = a.x-b.x; r.y = a.y-b.y; r.z = a.z-b.z; return r; } __device__ inline float randPos(int seed){ return float((seed%100001))/100000.0f; } __device__ inline float4 normalize(float4 a) { float s = dist1(a); a.x /= s; a.y /= s; a.z /= s; return a; } __device__ inline void move(int x, float4 *buffer, float4 *Positions, float4 *VelocityVector) { float4 pos = Positions[x]; float4 vect = VelocityVector[x]; pos.x += vect.x/10000.0f; pos.y += vect.y/10000.0f; pos.z += vect.z/10000.0f; buffer[x] = pos; Positions[x] = pos; } __device__ inline float4 tengent(float4 v){ float4 u = make_float4(0.0f, 0.0f, whirlSpeed, 1.0f); float4 result = make_float4(u.y * v.z - u.z * v.y, u.z * v.x - u.x * v.z, u.x * v.y - u.y * v.x, 1.0f); float s = distSquare1(result); return result; } // kernels __global__ void randomStatic(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = randPos(seed); seed = funct(seed); float posY = randPos(seed); seed = funct(seed); float posZ = randPos(seed); Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); VelocityVector[x] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); seed = funct(seed); Mass[x] = 1.0f; } __global__ void randomMoving(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = randPos(seed); seed = funct(seed); float posY = randPos(seed); seed = funct(seed); float posZ = randPos(seed); Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); posX = randPos(seed)*randomFactor; seed = funct(seed); posY = randPos(seed)*randomFactor; seed = funct(seed); posZ = randPos(seed)*randomFactor; VelocityVector[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void explosion(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = 0.0f; float posY = 0.0f; float posZ = 0.0f; Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); posX = randPos(seed); seed = funct(seed); posY = randPos(seed); seed = funct(seed); posZ = randPos(seed); float4 v = make_float4(posX, posY, posZ, 1.0f); v = normalize(v); v.x *= explosionFactor; v.y *= explosionFactor; v.z *= explosionFactor; VelocityVector[x] = v; seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void explosion2(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = 0.0f; float posY = 0.0f; float posZ = 0.0f; Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); posX = randPos(seed); seed = funct(seed); posY = randPos(seed); seed = funct(seed); posZ = randPos(seed); float4 v = make_float4(posX, posY, posZ, 1.0f); v = normalize(v); v.x *= explosion2Factor; v.y *= explosion2Factor; v.z *= explosion2Factor; VelocityVector[x] = v; seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void heavyMiddle(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; if(x == 0){ Positions[x] = VelocityVector[x] = buffer[x] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); Mass[x] = centerMass; return; } seed = funct(seed * x * prime); float posX = randPos(seed); seed = funct(seed); float posY = randPos(seed); seed = funct(seed); float posZ = randPos(seed); float4 v = make_float4(posX, posY, posZ, 1.0f); Positions[x] = buffer[x] = v; v = tengent(v); VelocityVector[x] = v; seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void simpleGravity(float4 *Positions, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; float4 force = make_float4(0.0f, 0.0f, 0.0f, 1.0f); float4 p = Positions[x]; for(int i = 0; i < bodies; i++){ if(i == x) continue; float4 forceVector = make_float4(0.0f, 0.0f, 0.0f, 1.0f); forceVector = sub(Positions[i], p); float d = distSquare1(forceVector); forceVector = normalize(forceVector); float c = Mass[i]/d; force.x += forceVector.x * c; force.y += forceVector.y * c; force.z += forceVector.z * c; } float4 v = VelocityVector[x]; v.x += force.x; v.y += force.y; v.z += force.z; VelocityVector[x] = v; } __global__ void improvedGravity(float4 *Positions, float4 *VelocityVector, float *Mass) { __shared__ float4 tab[threads]; __shared__ float mass[threads]; int x = blockIdx.x*blockDim.x + threadIdx.x; float3 force = make_float3(0.0f, 0.0f, 0.0f); __syncthreads(); float4 p = Positions[x]; for(int i = 0; i < bodies; i+=threads){ __syncthreads(); tab[threadIdx.x] = Positions[threadIdx.x + i]; __syncthreads(); mass[threadIdx.x] = Mass[threadIdx.x + i]; __syncthreads(); for(int j = 0; j < blockDim.x; j++){ if(j+i == x) continue; float4 forceVector = make_float4(0.0f, 0.0f, 0.0f, 1.0f); forceVector = sub(tab[j], p); float d = distSquare1(forceVector); forceVector = normalize(forceVector); float c = mass[j]/d; force.x += forceVector.x * c; force.y += forceVector.y * c; force.z += forceVector.z * c; } } float4 v = VelocityVector[x]; v.x += force.x; v.y += force.y; v.z += force.z; VelocityVector[x] = v; } __global__ void bounding(float4 *Positions, float *Borders, float* temp) { int x = blockIdx.x*blockDim.x + threadIdx.x; float4 p = Positions[x]; temp[x] = p.x; temp[bodies+x] = p.x; temp[bodies*2 + x] = p.y; temp[bodies*3 + x] = p.y; temp[bodies*4 + x] = p.z; temp[bodies*5 + x] = p.z; for(int i = 1; i < bodies; i*= 2){ if(!(x%i)){ if(temp[x] < temp[x+ i]) temp[x] = temp[x+ i]; if(temp[x+bodies] > temp[x+i + bodies]) temp[x+bodies] = temp[x+i +bodies]; if(temp[x+bodies*2] < temp[x+ i+bodies*2]) temp[x+bodies*2] = temp[x+ i +bodies*2]; if(temp[x+bodies*3] > temp[x+ i+bodies*3]) temp[x+bodies*3] = temp[x+ i +bodies*3]; if(temp[x+bodies*4] < temp[x+ i+bodies*4]) temp[x+bodies*4] = temp[x+ i +bodies*4]; if(temp[x+bodies*5] > temp[x+ i+bodies*5]) temp[x+bodies*5] = temp[x+ i +bodies*5]; } } #pragma unroll for(int i = 0; i < 6; i++){ Borders[i] = temp[bodies*i]; } } __global__ void shrink(float4 *buffer, float4 *Positions) { int x = blockIdx.x*blockDim.x + threadIdx.x; Positions[x].x *= 0.9992f; Positions[x].y *= 0.9992f; Positions[x].z *= 0.9992f; buffer[x] = Positions[x]; } __global__ void simpleMove(float4 *buffer, float4 *Positions, float4 *VelocityVector) { int x = blockIdx.x*blockDim.x + threadIdx.x; move(x, buffer, Positions, VelocityVector); } #endif
342db1d09852b7ad99db84604986f621e2b37306.cu
#ifndef _NBODY_KERNEL_H_ #define _NBODY_KERNEL_H_ #include "config.h" #define prime 120247 typedef unsigned int uint; //! Cuda random functions from "Kurs: procesory graficzne w obliczeniach równoległych (CUDA)" 2012 __device__ uint TausStep(uint &z, int S1, int S2, int S3, uint M) { uint b=(((z << S1) ^ z) >> S2); return z = (((z & M) << S3) ^ b); } __device__ uint LCGStep(uint &z, uint A, uint C) { return z=(A*z+C); } __device__ uint HybridTausInt(uint &z1, uint &z2, uint &z3, uint &z4) { return ( TausStep(z1, 13, 19, 12, 4294967294UL) ^ LCGStep( z4, 1664525, 1013904223UL) ); } __device__ uint funct(uint id) { return HybridTausInt(id,id,id,id); } __device__ inline float square(float a) { return a*a; } __device__ inline float distSquare2(float4 posA, float4 posB) { return square((posA.x-posB.x) + square(posA.y-posB.y) + square(posA.z-posB.z)); } __device__ inline float distSquare1(float4 posA) { float f = (square(posA.x*distanceMultiplier) + square(posA.y*distanceMultiplier) + square(posA.z*distanceMultiplier)); if(f < 1.0f) f = 1.0f; return f; } __device__ inline float dist1(float4 posA) { return sqrt(distSquare1(posA)); } __device__ inline float4 sub(float4 a, float4 b){ float4 r; r.x = a.x-b.x; r.y = a.y-b.y; r.z = a.z-b.z; return r; } __device__ inline float randPos(int seed){ return float((seed%100001))/100000.0f; } __device__ inline float4 normalize(float4 a) { float s = dist1(a); a.x /= s; a.y /= s; a.z /= s; return a; } __device__ inline void move(int x, float4 *buffer, float4 *Positions, float4 *VelocityVector) { float4 pos = Positions[x]; float4 vect = VelocityVector[x]; pos.x += vect.x/10000.0f; pos.y += vect.y/10000.0f; pos.z += vect.z/10000.0f; buffer[x] = pos; Positions[x] = pos; } __device__ inline float4 tengent(float4 v){ float4 u = make_float4(0.0f, 0.0f, whirlSpeed, 1.0f); float4 result = make_float4(u.y * v.z - u.z * v.y, u.z * v.x - u.x * v.z, u.x * v.y - u.y * v.x, 1.0f); float s = distSquare1(result); return result; } // kernels __global__ void randomStatic(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = randPos(seed); seed = funct(seed); float posY = randPos(seed); seed = funct(seed); float posZ = randPos(seed); Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); VelocityVector[x] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); seed = funct(seed); Mass[x] = 1.0f; } __global__ void randomMoving(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = randPos(seed); seed = funct(seed); float posY = randPos(seed); seed = funct(seed); float posZ = randPos(seed); Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); posX = randPos(seed)*randomFactor; seed = funct(seed); posY = randPos(seed)*randomFactor; seed = funct(seed); posZ = randPos(seed)*randomFactor; VelocityVector[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void explosion(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = 0.0f; float posY = 0.0f; float posZ = 0.0f; Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); posX = randPos(seed); seed = funct(seed); posY = randPos(seed); seed = funct(seed); posZ = randPos(seed); float4 v = make_float4(posX, posY, posZ, 1.0f); v = normalize(v); v.x *= explosionFactor; v.y *= explosionFactor; v.z *= explosionFactor; VelocityVector[x] = v; seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void explosion2(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; seed = funct(seed * x * prime); float posX = 0.0f; float posY = 0.0f; float posZ = 0.0f; Positions[x] = buffer[x] = make_float4(posX, posY, posZ, 1.0f); seed = funct(seed); posX = randPos(seed); seed = funct(seed); posY = randPos(seed); seed = funct(seed); posZ = randPos(seed); float4 v = make_float4(posX, posY, posZ, 1.0f); v = normalize(v); v.x *= explosion2Factor; v.y *= explosion2Factor; v.z *= explosion2Factor; VelocityVector[x] = v; seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void heavyMiddle(float4 *buffer, float4 *Positions, int seed, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; if(x == 0){ Positions[x] = VelocityVector[x] = buffer[x] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); Mass[x] = centerMass; return; } seed = funct(seed * x * prime); float posX = randPos(seed); seed = funct(seed); float posY = randPos(seed); seed = funct(seed); float posZ = randPos(seed); float4 v = make_float4(posX, posY, posZ, 1.0f); Positions[x] = buffer[x] = v; v = tengent(v); VelocityVector[x] = v; seed = funct(seed); Mass[x] = float(seed%401+800)/800.0f; } __global__ void simpleGravity(float4 *Positions, float4 *VelocityVector, float *Mass) { int x = blockIdx.x*blockDim.x + threadIdx.x; float4 force = make_float4(0.0f, 0.0f, 0.0f, 1.0f); float4 p = Positions[x]; for(int i = 0; i < bodies; i++){ if(i == x) continue; float4 forceVector = make_float4(0.0f, 0.0f, 0.0f, 1.0f); forceVector = sub(Positions[i], p); float d = distSquare1(forceVector); forceVector = normalize(forceVector); float c = Mass[i]/d; force.x += forceVector.x * c; force.y += forceVector.y * c; force.z += forceVector.z * c; } float4 v = VelocityVector[x]; v.x += force.x; v.y += force.y; v.z += force.z; VelocityVector[x] = v; } __global__ void improvedGravity(float4 *Positions, float4 *VelocityVector, float *Mass) { __shared__ float4 tab[threads]; __shared__ float mass[threads]; int x = blockIdx.x*blockDim.x + threadIdx.x; float3 force = make_float3(0.0f, 0.0f, 0.0f); __syncthreads(); float4 p = Positions[x]; for(int i = 0; i < bodies; i+=threads){ __syncthreads(); tab[threadIdx.x] = Positions[threadIdx.x + i]; __syncthreads(); mass[threadIdx.x] = Mass[threadIdx.x + i]; __syncthreads(); for(int j = 0; j < blockDim.x; j++){ if(j+i == x) continue; float4 forceVector = make_float4(0.0f, 0.0f, 0.0f, 1.0f); forceVector = sub(tab[j], p); float d = distSquare1(forceVector); forceVector = normalize(forceVector); float c = mass[j]/d; force.x += forceVector.x * c; force.y += forceVector.y * c; force.z += forceVector.z * c; } } float4 v = VelocityVector[x]; v.x += force.x; v.y += force.y; v.z += force.z; VelocityVector[x] = v; } __global__ void bounding(float4 *Positions, float *Borders, float* temp) { int x = blockIdx.x*blockDim.x + threadIdx.x; float4 p = Positions[x]; temp[x] = p.x; temp[bodies+x] = p.x; temp[bodies*2 + x] = p.y; temp[bodies*3 + x] = p.y; temp[bodies*4 + x] = p.z; temp[bodies*5 + x] = p.z; for(int i = 1; i < bodies; i*= 2){ if(!(x%i)){ if(temp[x] < temp[x+ i]) temp[x] = temp[x+ i]; if(temp[x+bodies] > temp[x+i + bodies]) temp[x+bodies] = temp[x+i +bodies]; if(temp[x+bodies*2] < temp[x+ i+bodies*2]) temp[x+bodies*2] = temp[x+ i +bodies*2]; if(temp[x+bodies*3] > temp[x+ i+bodies*3]) temp[x+bodies*3] = temp[x+ i +bodies*3]; if(temp[x+bodies*4] < temp[x+ i+bodies*4]) temp[x+bodies*4] = temp[x+ i +bodies*4]; if(temp[x+bodies*5] > temp[x+ i+bodies*5]) temp[x+bodies*5] = temp[x+ i +bodies*5]; } } #pragma unroll for(int i = 0; i < 6; i++){ Borders[i] = temp[bodies*i]; } } __global__ void shrink(float4 *buffer, float4 *Positions) { int x = blockIdx.x*blockDim.x + threadIdx.x; Positions[x].x *= 0.9992f; Positions[x].y *= 0.9992f; Positions[x].z *= 0.9992f; buffer[x] = Positions[x]; } __global__ void simpleMove(float4 *buffer, float4 *Positions, float4 *VelocityVector) { int x = blockIdx.x*blockDim.x + threadIdx.x; move(x, buffer, Positions, VelocityVector); } #endif
3a9f69b583ea18a1a82c13da6761a2a6be54de90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #ifdef THRUST_TEST_DEVICE_SIDE template<typename ExecutionPolicy, typename Iterator, typename Predicate, typename Iterator2> __global__ void is_partitioned_kernel(ExecutionPolicy exec, Iterator first, Iterator last, Predicate pred, Iterator2 result) { *result = thrust::is_partitioned(exec, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename ExecutionPolicy> void TestIsPartitionedDevice(ExecutionPolicy exec) { size_t n = 1000; n = thrust::max<size_t>(n, 2); thrust::device_vector<int> v = unittest::random_integers<int>(n); thrust::device_vector<bool> result(1); v[0] = 1; v[1] = 0; hipLaunchKernelGGL(( is_partitioned_kernel), dim3(1),dim3(1), 0, 0, exec, v.begin(), v.end(), is_even<int>(), result.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } ASSERT_EQUAL(false, result[0]); thrust::partition(v.begin(), v.end(), is_even<int>()); hipLaunchKernelGGL(( is_partitioned_kernel), dim3(1),dim3(1), 0, 0, exec, v.begin(), v.end(), is_even<int>(), result.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } ASSERT_EQUAL(true, result[0]); } void TestIsPartitionedDeviceSeq() { TestIsPartitionedDevice(thrust::seq); } DECLARE_UNITTEST(TestIsPartitionedDeviceSeq); void TestIsPartitionedDeviceDevice() { TestIsPartitionedDevice(thrust::device); } DECLARE_UNITTEST(TestIsPartitionedDeviceDevice); #endif void TestIsPartitionedCudaStreams() { thrust::device_vector<int> v(4); v[0] = 1; v[1] = 1; v[2] = 1; v[3] = 0; hipStream_t s; hipStreamCreate(&s); // empty partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::hip::par.on(s), v.begin(), v.begin(), thrust::identity<int>())); // one element true partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::hip::par.on(s), v.begin(), v.begin() + 1, thrust::identity<int>())); // just true partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::hip::par.on(s), v.begin(), v.begin() + 2, thrust::identity<int>())); // both true & false partitions ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::hip::par.on(s), v.begin(), v.end(), thrust::identity<int>())); // one element false partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::hip::par.on(s), v.begin() + 3, v.end(), thrust::identity<int>())); v[0] = 1; v[1] = 0; v[2] = 1; v[3] = 1; // not partitioned ASSERT_EQUAL_QUIET(false, thrust::is_partitioned(thrust::hip::par.on(s), v.begin(), v.end(), thrust::identity<int>())); hipStreamDestroy(s); } DECLARE_UNITTEST(TestIsPartitionedCudaStreams);
3a9f69b583ea18a1a82c13da6761a2a6be54de90.cu
#include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #ifdef THRUST_TEST_DEVICE_SIDE template<typename ExecutionPolicy, typename Iterator, typename Predicate, typename Iterator2> __global__ void is_partitioned_kernel(ExecutionPolicy exec, Iterator first, Iterator last, Predicate pred, Iterator2 result) { *result = thrust::is_partitioned(exec, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename ExecutionPolicy> void TestIsPartitionedDevice(ExecutionPolicy exec) { size_t n = 1000; n = thrust::max<size_t>(n, 2); thrust::device_vector<int> v = unittest::random_integers<int>(n); thrust::device_vector<bool> result(1); v[0] = 1; v[1] = 0; is_partitioned_kernel<<<1,1>>>(exec, v.begin(), v.end(), is_even<int>(), result.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } ASSERT_EQUAL(false, result[0]); thrust::partition(v.begin(), v.end(), is_even<int>()); is_partitioned_kernel<<<1,1>>>(exec, v.begin(), v.end(), is_even<int>(), result.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } ASSERT_EQUAL(true, result[0]); } void TestIsPartitionedDeviceSeq() { TestIsPartitionedDevice(thrust::seq); } DECLARE_UNITTEST(TestIsPartitionedDeviceSeq); void TestIsPartitionedDeviceDevice() { TestIsPartitionedDevice(thrust::device); } DECLARE_UNITTEST(TestIsPartitionedDeviceDevice); #endif void TestIsPartitionedCudaStreams() { thrust::device_vector<int> v(4); v[0] = 1; v[1] = 1; v[2] = 1; v[3] = 0; cudaStream_t s; cudaStreamCreate(&s); // empty partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::cuda::par.on(s), v.begin(), v.begin(), thrust::identity<int>())); // one element true partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::cuda::par.on(s), v.begin(), v.begin() + 1, thrust::identity<int>())); // just true partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::cuda::par.on(s), v.begin(), v.begin() + 2, thrust::identity<int>())); // both true & false partitions ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::cuda::par.on(s), v.begin(), v.end(), thrust::identity<int>())); // one element false partition ASSERT_EQUAL_QUIET(true, thrust::is_partitioned(thrust::cuda::par.on(s), v.begin() + 3, v.end(), thrust::identity<int>())); v[0] = 1; v[1] = 0; v[2] = 1; v[3] = 1; // not partitioned ASSERT_EQUAL_QUIET(false, thrust::is_partitioned(thrust::cuda::par.on(s), v.begin(), v.end(), thrust::identity<int>())); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestIsPartitionedCudaStreams);
1a5e289435d8a607b75160b4ceac9cb3f1dfff24.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixFunc.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *F = NULL; hipMalloc(&F, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixFunc), dim3(gridBlock),dim3(threadBlock), 0, 0, F,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixFunc), dim3(gridBlock),dim3(threadBlock), 0, 0, F,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixFunc), dim3(gridBlock),dim3(threadBlock), 0, 0, F,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1a5e289435d8a607b75160b4ceac9cb3f1dfff24.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixFunc.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *F = NULL; cudaMalloc(&F, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixFunc<<<gridBlock,threadBlock>>>(F,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixFunc<<<gridBlock,threadBlock>>>(F,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixFunc<<<gridBlock,threadBlock>>>(F,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a9f7111f2c1bf38c259df4f34e1c1bd45a75e761.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/images2neibs/kernel.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/images2neibs/kernel.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" #include <cstdio> namespace megdnn { namespace cuda { namespace images2neibs { #define grid_y_max 512 template <typename T> __global__ void forward_kernel(const T *src, T *dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int WH, int WW) { int NC = N * C; int WP = WH*WW; for (int wp = threadIdx.x; wp < WP; wp += blockDim.x) { int nc = blockIdx.y; while (nc < NC) { int wh = wp / WW; int ww = wp % WW; int op = threadIdx.y + blockIdx.x * blockDim.y; if (op < OH * OW) { int oh = op / OW; int ow = op % OW; int ih = -ph + sh * oh + wh; int iw = -pw + sw * ow + ww; int dst_pos = nc * OH * OW * WH * WW + op * WH * WW + wp; int src_pos = nc * IH * IW + ih * IW + iw; dst[dst_pos] = (ih >= 0 && ih < IH && iw >= 0 && iw < IW) ? src[src_pos] : 0.0f; } nc += grid_y_max; } } } template <typename T> void forward(const T* src, T* dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int wh, int ww, hipStream_t stream) { int spatial_size = OH * OW; int kernel_size = wh * ww; int tx = min(NR_THREADS, kernel_size); int ty = NR_THREADS / tx; megdnn_assert(ty > 0); int bx = DIVUP(spatial_size, ty); int by = N * C; hipLaunchKernelGGL(( forward_kernel), dim3(dim3(bx, ::min(grid_y_max, by))), dim3(dim3(tx, ty)), 0, stream, src, dst, N, C, IH, IW, OH, OW, ph, pw, sh, sw, wh, ww); after_kernel_launch(); } #undef grid_y_max template <typename T> __global__ void backward_kernel(const T *diff, T *grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int WH, int WW) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < N*C*IH*IW) { int nc = id / (IH*IW); int ih = id % (IH*IW) / IW; int iw = id % (IH*IW) % IW; grad[nc*IH*IW + ih*IW + iw] = 0.0f; int oh_max = min((ih+ph) / sh, OH-1); int oh_min = max((ih+ph-(WH-1)+sh-1) / sh, 0); int ow_max = min((iw+pw) / sw, OW-1); int ow_min = max((iw+pw-(WW-1)+sw-1) / sw, 0); for (int oh = oh_min; oh <= oh_max; ++oh) for (int ow = ow_min; ow <= ow_max; ++ow) { int wh = ih+ph - sh*oh; int ww = iw+pw - sw*ow; grad[nc*IH*IW + ih*IW + iw] += diff[nc*OH*OW*WH*WW + oh*OW*WH*WW + ow*WH*WW + wh*WW + ww]; } } } template <typename T> void backward(const T *diff, T *grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int wh, int ww, hipStream_t stream) { int threads = NR_THREADS; int blocks = DIVUP(N*C*IH*IW, threads); hipLaunchKernelGGL(( backward_kernel), dim3(blocks), dim3(threads), 0, stream, diff, grad, N, C, IH, IW, OH, OW, ph, pw, sh, sw, wh, ww); after_kernel_launch(); } #define INST(T) \ template void forward<T>(const T *, T *, int, int, int, int, int, int, \ int, int, int, int, int, int, \ hipStream_t); \ template void backward<T>(const T *, T *, int, int, int, int, int, int, \ int, int, int, int, int, int, \ hipStream_t); #define cb(DType) \ INST(DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) } // namespace images2neibs } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
a9f7111f2c1bf38c259df4f34e1c1bd45a75e761.cu
/** * \file dnn/src/cuda/images2neibs/kernel.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/images2neibs/kernel.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" #include <cstdio> namespace megdnn { namespace cuda { namespace images2neibs { #define grid_y_max 512 template <typename T> __global__ void forward_kernel(const T *src, T *dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int WH, int WW) { int NC = N * C; int WP = WH*WW; for (int wp = threadIdx.x; wp < WP; wp += blockDim.x) { int nc = blockIdx.y; while (nc < NC) { int wh = wp / WW; int ww = wp % WW; int op = threadIdx.y + blockIdx.x * blockDim.y; if (op < OH * OW) { int oh = op / OW; int ow = op % OW; int ih = -ph + sh * oh + wh; int iw = -pw + sw * ow + ww; int dst_pos = nc * OH * OW * WH * WW + op * WH * WW + wp; int src_pos = nc * IH * IW + ih * IW + iw; dst[dst_pos] = (ih >= 0 && ih < IH && iw >= 0 && iw < IW) ? src[src_pos] : 0.0f; } nc += grid_y_max; } } } template <typename T> void forward(const T* src, T* dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int wh, int ww, cudaStream_t stream) { int spatial_size = OH * OW; int kernel_size = wh * ww; int tx = min(NR_THREADS, kernel_size); int ty = NR_THREADS / tx; megdnn_assert(ty > 0); int bx = DIVUP(spatial_size, ty); int by = N * C; forward_kernel<<<dim3(bx, std::min(grid_y_max, by)), dim3(tx, ty), 0, stream>>>(src, dst, N, C, IH, IW, OH, OW, ph, pw, sh, sw, wh, ww); after_kernel_launch(); } #undef grid_y_max template <typename T> __global__ void backward_kernel(const T *diff, T *grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int WH, int WW) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < N*C*IH*IW) { int nc = id / (IH*IW); int ih = id % (IH*IW) / IW; int iw = id % (IH*IW) % IW; grad[nc*IH*IW + ih*IW + iw] = 0.0f; int oh_max = min((ih+ph) / sh, OH-1); int oh_min = max((ih+ph-(WH-1)+sh-1) / sh, 0); int ow_max = min((iw+pw) / sw, OW-1); int ow_min = max((iw+pw-(WW-1)+sw-1) / sw, 0); for (int oh = oh_min; oh <= oh_max; ++oh) for (int ow = ow_min; ow <= ow_max; ++ow) { int wh = ih+ph - sh*oh; int ww = iw+pw - sw*ow; grad[nc*IH*IW + ih*IW + iw] += diff[nc*OH*OW*WH*WW + oh*OW*WH*WW + ow*WH*WW + wh*WW + ww]; } } } template <typename T> void backward(const T *diff, T *grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int wh, int ww, cudaStream_t stream) { int threads = NR_THREADS; int blocks = DIVUP(N*C*IH*IW, threads); backward_kernel<<<blocks, threads, 0, stream>>>(diff, grad, N, C, IH, IW, OH, OW, ph, pw, sh, sw, wh, ww); after_kernel_launch(); } #define INST(T) \ template void forward<T>(const T *, T *, int, int, int, int, int, int, \ int, int, int, int, int, int, \ cudaStream_t); \ template void backward<T>(const T *, T *, int, int, int, int, int, int, \ int, int, int, int, int, int, \ cudaStream_t); #define cb(DType) \ INST(DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) } // namespace images2neibs } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
226df785eb3fd742b1c0521e703ca1983fa260f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "arg.h" #include "cuda_help.h" #include "proj.h" using namespace Legion; namespace legate { namespace numpy { template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_get_arg_1d(const AccessorWO<int64_t, 1> out, const AccessorRO<Argval<T>, 1> in, const Point<1> origin, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; out[x] = in[x].arg; } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_get_arg_2d(const AccessorWO<int64_t, 2> out, const AccessorRO<Argval<T>, 2> in, const Point<2> origin, const Point<1> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; out[x][y] = in[x][y].arg; } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_get_arg_3d(const AccessorWO<int64_t, 3> out, const AccessorRO<Argval<T>, 3> in, const Point<3> origin, const Point<2> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; out[x][y][z] = in[x][y][z].arg; } template<typename T> /*static*/ void GetargTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const int extra_dim = derez.unpack_dimension(); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const AccessorWO<int64_t, 1> out = derez.unpack_accessor_WO<int64_t, 1>(regions[0], rect); const AccessorRO<Argval<T>, 1> in = (extra_dim >= 0) ? derez.unpack_accessor_RO<Argval<T>, 1>(regions[1], rect, extra_dim, 0) : derez.unpack_accessor_RO<Argval<T>, 1>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( legate_get_arg_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in, rect.lo, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const AccessorWO<int64_t, 2> out = derez.unpack_accessor_WO<int64_t, 2>(regions[0], rect); const AccessorRO<Argval<T>, 2> in = (extra_dim >= 0) ? derez.unpack_accessor_RO<Argval<T>, 2>(regions[1], rect, extra_dim, 0) : derez.unpack_accessor_RO<Argval<T>, 2>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; hipLaunchKernelGGL(( legate_get_arg_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in, rect.lo, Point<1>(pitch), volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const AccessorWO<int64_t, 3> out = derez.unpack_accessor_WO<int64_t, 3>(regions[0], rect); const AccessorRO<Argval<T>, 3> in = (extra_dim >= 0) ? derez.unpack_accessor_RO<Argval<T>, 3>(regions[1], rect, extra_dim, 0) : derez.unpack_accessor_RO<Argval<T>, 3>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; hipLaunchKernelGGL(( legate_get_arg_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in, rect.lo, Point<2>(pitch), volume); break; } default: assert(false); } } INSTANTIATE_TASK_VARIANT(GetargTask, gpu_variant) } // namespace numpy } // namespace legate
226df785eb3fd742b1c0521e703ca1983fa260f7.cu
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "arg.h" #include "cuda_help.h" #include "proj.h" using namespace Legion; namespace legate { namespace numpy { template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_get_arg_1d(const AccessorWO<int64_t, 1> out, const AccessorRO<Argval<T>, 1> in, const Point<1> origin, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; out[x] = in[x].arg; } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_get_arg_2d(const AccessorWO<int64_t, 2> out, const AccessorRO<Argval<T>, 2> in, const Point<2> origin, const Point<1> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; out[x][y] = in[x][y].arg; } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_get_arg_3d(const AccessorWO<int64_t, 3> out, const AccessorRO<Argval<T>, 3> in, const Point<3> origin, const Point<2> pitch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; out[x][y][z] = in[x][y][z].arg; } template<typename T> /*static*/ void GetargTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const int extra_dim = derez.unpack_dimension(); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const AccessorWO<int64_t, 1> out = derez.unpack_accessor_WO<int64_t, 1>(regions[0], rect); const AccessorRO<Argval<T>, 1> in = (extra_dim >= 0) ? derez.unpack_accessor_RO<Argval<T>, 1>(regions[1], rect, extra_dim, 0) : derez.unpack_accessor_RO<Argval<T>, 1>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; legate_get_arg_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in, rect.lo, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const AccessorWO<int64_t, 2> out = derez.unpack_accessor_WO<int64_t, 2>(regions[0], rect); const AccessorRO<Argval<T>, 2> in = (extra_dim >= 0) ? derez.unpack_accessor_RO<Argval<T>, 2>(regions[1], rect, extra_dim, 0) : derez.unpack_accessor_RO<Argval<T>, 2>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; legate_get_arg_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in, rect.lo, Point<1>(pitch), volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const AccessorWO<int64_t, 3> out = derez.unpack_accessor_WO<int64_t, 3>(regions[0], rect); const AccessorRO<Argval<T>, 3> in = (extra_dim >= 0) ? derez.unpack_accessor_RO<Argval<T>, 3>(regions[1], rect, extra_dim, 0) : derez.unpack_accessor_RO<Argval<T>, 3>(regions[1], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; legate_get_arg_3d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in, rect.lo, Point<2>(pitch), volume); break; } default: assert(false); } } INSTANTIATE_TASK_VARIANT(GetargTask, gpu_variant) } // namespace numpy } // namespace legate
5309107d3a95bfb6b24939a59ff25deaee0e4ba1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/fill_diagonal_grad_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/common_shape.h" namespace phi { template <typename T> __global__ void fill_constant_kernel(const int64_t featuresize, T* in_data, int64_t strides, int offset, T fillvar, int dims) { for (int64_t idx = blockIdx.x * featuresize + threadIdx.x; idx * strides + offset < (blockIdx.x + 1) * featuresize; idx += blockDim.x) { // to check if the new position with offset is still in the same line; // this modify should not affect across lines. // out_dims[1] is also work for tensor with dim>2, for which the dims must // be the same number if ((idx * strides) % dims + offset < dims && (idx * strides) % dims + offset >= 0) { in_data[idx * strides + offset] = fillvar; } } } template <typename T, typename Context> void FillDiagonalGradKernel(const Context& ctx, const DenseTensor& out_grad, float value, int offset, bool wrap, DenseTensor* x_grad) { #ifdef __HIPCC__ const int64_t kMaxBlockDim = 256; #else const int64_t kMaxBlockDim = 512; #endif auto* in_data = ctx.template Alloc<T>(x_grad); phi::Copy(ctx, out_grad, ctx.GetPlace(), false, x_grad); auto size = x_grad->numel(); auto out_dims = x_grad->dims(); auto strides = funcs::CalStride(out_dims); auto wrapsize = ::min(size, out_dims[1] * out_dims[1]); // The wrap mode supported only the dims equels to 2; In wrap mode, the // value will be filled in cycles if (wrap) { wrapsize = size; } int64_t kBlockDim = ::min(int64_t(size), kMaxBlockDim); hipLaunchKernelGGL(( fill_constant_kernel<T>), dim3(1), dim3(kBlockDim), 0, 0, wrapsize, in_data, strides, offset, T(0), out_dims[1]); } } // namespace phi PD_REGISTER_KERNEL(fill_diagonal_grad, GPU, ALL_LAYOUT, phi::FillDiagonalGradKernel, float, double, int64_t, int, phi::dtype::float16, bool) {}
5309107d3a95bfb6b24939a59ff25deaee0e4ba1.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/fill_diagonal_grad_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/common_shape.h" namespace phi { template <typename T> __global__ void fill_constant_kernel(const int64_t featuresize, T* in_data, int64_t strides, int offset, T fillvar, int dims) { for (int64_t idx = blockIdx.x * featuresize + threadIdx.x; idx * strides + offset < (blockIdx.x + 1) * featuresize; idx += blockDim.x) { // to check if the new position with offset is still in the same line; // this modify should not affect across lines. // out_dims[1] is also work for tensor with dim>2, for which the dims must // be the same number if ((idx * strides) % dims + offset < dims && (idx * strides) % dims + offset >= 0) { in_data[idx * strides + offset] = fillvar; } } } template <typename T, typename Context> void FillDiagonalGradKernel(const Context& ctx, const DenseTensor& out_grad, float value, int offset, bool wrap, DenseTensor* x_grad) { #ifdef __HIPCC__ const int64_t kMaxBlockDim = 256; #else const int64_t kMaxBlockDim = 512; #endif auto* in_data = ctx.template Alloc<T>(x_grad); phi::Copy(ctx, out_grad, ctx.GetPlace(), false, x_grad); auto size = x_grad->numel(); auto out_dims = x_grad->dims(); auto strides = funcs::CalStride(out_dims); auto wrapsize = std::min(size, out_dims[1] * out_dims[1]); // The wrap mode supported only the dims equels to 2; In wrap mode, the // value will be filled in cycles if (wrap) { wrapsize = size; } int64_t kBlockDim = std::min(int64_t(size), kMaxBlockDim); fill_constant_kernel<T><<<1, kBlockDim, 0>>>( wrapsize, in_data, strides, offset, T(0), out_dims[1]); } } // namespace phi PD_REGISTER_KERNEL(fill_diagonal_grad, GPU, ALL_LAYOUT, phi::FillDiagonalGradKernel, float, double, int64_t, int, phi::dtype::float16, bool) {}
9532e799979ba27f780969583c4bdd178ada51cb.hip
// !!! This is a file automatically generated by hipify!!! #include "vec3.cuh" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/random.h> namespace crt { __host__ __device__ vec3::vec3() : x(0) , y(0) , z(0){}; __host__ __device__ vec3::vec3(float _x, float _y, float _z) : x(_x) , y(_y) , z(_z){}; vec3::vec3(JSONArray json_vector) : x(json_vector[0]->AsNumber()) , y(json_vector[1]->AsNumber()) , z(json_vector[2]->AsNumber()){}; vec3::vec3(JSONValue* json_vector) { JSONArray json_array = json_vector->AsArray(); x = json_array[0]->AsNumber(); y = json_array[1]->AsNumber(); z = json_array[2]->AsNumber(); }; __host__ __device__ vec3::vec3(const vec3& othre) : x(othre.x) , y(othre.y) , z(othre.z){}; __host__ __device__ vec3 vec3::operator=(const vec3& rhs) { x = rhs.x; y = rhs.y; z = rhs.z; return (*this); }; __host__ __device__ vec3::~vec3(){}; __host__ __device__ vec3 vec3::operator+(const vec3& rhs) const { return vec3(x + rhs.x, y + rhs.y, z + rhs.z); }; __host__ __device__ vec3 vec3::operator+=(const vec3& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; return *this; }; __host__ __device__ vec3 vec3::operator-(const vec3& rhs) const { return vec3(x - rhs.x, y - rhs.y, z - rhs.z); }; __host__ __device__ vec3 vec3::operator-=(const vec3& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; return *this; }; __host__ __device__ vec3 vec3::operator-() const { return vec3(-x, -y, -z); }; __host__ __device__ vec3 vec3::operator*(float rhs) const { return vec3(x * rhs, y * rhs, z * rhs); }; __host__ __device__ vec3 vec3::operator*=(float rhs) { x *= rhs; y *= rhs; z *= rhs; return *this; }; __host__ __device__ vec3::operator float() const { return sqrtf(x * x + y * y + z * z); }; __host__ __device__ float vec3::lengthSquared() const { return x * x + y * y + z * z; }; __host__ __device__ vec3 vec3::normalize() { float lenght = (float)(*this); x /= lenght; y /= lenght; z /= lenght; return *this; }; __host__ __device__ vec3 vec3::normalized() const { float lenght = lengthSquared(); if (lenght == 1) { return vec3(*this); } lenght = sqrtf(lenght); return vec3(x / lenght, y / lenght, z / lenght); }; __host__ __device__ float vec3::dot(const vec3& lhs, const vec3& rhs) { return lhs.x * rhs.x + lhs.y * rhs.y + lhs.z * rhs.z; }; __host__ __device__ vec3 vec3::cross(const vec3& lhs, const vec3& rhs) { return vec3(lhs.y * rhs.z - lhs.z * rhs.y, lhs.z * rhs.x - lhs.x * rhs.z, lhs.x * rhs.y - lhs.y * rhs.x); }; __host__ __device__ vec3 vec3::reflect(const vec3& rhs, const vec3& normal) { return rhs - normal * (2 * vec3::dot(rhs, normal)); }; __host__ __device__ vec3 vec3::refract(const vec3& rhs, const vec3& normal, float refractionIndex) { // TODO return vec3(); }; __host__ __device__ vec3 vec3::random() { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0, 1); float phi = acosf(2 * uniDist(randEng) - 1); float lambda = uniDist(randEng) * 2 * PI; return vec3(cosf(lambda) * cosf(phi), sinf(phi), sinf(lambda) * cosf(phi)); }; __host__ __device__ vec3 vec3::random(const vec3& normal) { vec3 random = vec3::random(); if (vec3::dot(random, normal) < 0) { random = -random; } return random; }; std::ostream& operator<<(std::ostream& os, const vec3& vec) { os << "vec3: x=" << std::to_string(vec.x) << " y=" << std::to_string(vec.y) << " z=" << std::to_string(vec.z); return os; } };
9532e799979ba27f780969583c4bdd178ada51cb.cu
#include "vec3.cuh" #include <cuda.h> #include <cuda_runtime.h> #include <thrust/random.h> namespace crt { __host__ __device__ vec3::vec3() : x(0) , y(0) , z(0){}; __host__ __device__ vec3::vec3(float _x, float _y, float _z) : x(_x) , y(_y) , z(_z){}; vec3::vec3(JSONArray json_vector) : x(json_vector[0]->AsNumber()) , y(json_vector[1]->AsNumber()) , z(json_vector[2]->AsNumber()){}; vec3::vec3(JSONValue* json_vector) { JSONArray json_array = json_vector->AsArray(); x = json_array[0]->AsNumber(); y = json_array[1]->AsNumber(); z = json_array[2]->AsNumber(); }; __host__ __device__ vec3::vec3(const vec3& othre) : x(othre.x) , y(othre.y) , z(othre.z){}; __host__ __device__ vec3 vec3::operator=(const vec3& rhs) { x = rhs.x; y = rhs.y; z = rhs.z; return (*this); }; __host__ __device__ vec3::~vec3(){}; __host__ __device__ vec3 vec3::operator+(const vec3& rhs) const { return vec3(x + rhs.x, y + rhs.y, z + rhs.z); }; __host__ __device__ vec3 vec3::operator+=(const vec3& rhs) { x += rhs.x; y += rhs.y; z += rhs.z; return *this; }; __host__ __device__ vec3 vec3::operator-(const vec3& rhs) const { return vec3(x - rhs.x, y - rhs.y, z - rhs.z); }; __host__ __device__ vec3 vec3::operator-=(const vec3& rhs) { x -= rhs.x; y -= rhs.y; z -= rhs.z; return *this; }; __host__ __device__ vec3 vec3::operator-() const { return vec3(-x, -y, -z); }; __host__ __device__ vec3 vec3::operator*(float rhs) const { return vec3(x * rhs, y * rhs, z * rhs); }; __host__ __device__ vec3 vec3::operator*=(float rhs) { x *= rhs; y *= rhs; z *= rhs; return *this; }; __host__ __device__ vec3::operator float() const { return sqrtf(x * x + y * y + z * z); }; __host__ __device__ float vec3::lengthSquared() const { return x * x + y * y + z * z; }; __host__ __device__ vec3 vec3::normalize() { float lenght = (float)(*this); x /= lenght; y /= lenght; z /= lenght; return *this; }; __host__ __device__ vec3 vec3::normalized() const { float lenght = lengthSquared(); if (lenght == 1) { return vec3(*this); } lenght = sqrtf(lenght); return vec3(x / lenght, y / lenght, z / lenght); }; __host__ __device__ float vec3::dot(const vec3& lhs, const vec3& rhs) { return lhs.x * rhs.x + lhs.y * rhs.y + lhs.z * rhs.z; }; __host__ __device__ vec3 vec3::cross(const vec3& lhs, const vec3& rhs) { return vec3(lhs.y * rhs.z - lhs.z * rhs.y, lhs.z * rhs.x - lhs.x * rhs.z, lhs.x * rhs.y - lhs.y * rhs.x); }; __host__ __device__ vec3 vec3::reflect(const vec3& rhs, const vec3& normal) { return rhs - normal * (2 * vec3::dot(rhs, normal)); }; __host__ __device__ vec3 vec3::refract(const vec3& rhs, const vec3& normal, float refractionIndex) { // TODO return vec3(); }; __host__ __device__ vec3 vec3::random() { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0, 1); float phi = acosf(2 * uniDist(randEng) - 1); float lambda = uniDist(randEng) * 2 * PI; return vec3(cosf(lambda) * cosf(phi), sinf(phi), sinf(lambda) * cosf(phi)); }; __host__ __device__ vec3 vec3::random(const vec3& normal) { vec3 random = vec3::random(); if (vec3::dot(random, normal) < 0) { random = -random; } return random; }; std::ostream& operator<<(std::ostream& os, const vec3& vec) { os << "vec3: x=" << std::to_string(vec.x) << " y=" << std::to_string(vec.y) << " z=" << std::to_string(vec.z); return os; } };
86ba9796beb69267d97a14736617282225b6025e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/top_k_grad_kernel.h" namespace phi { namespace ops = paddle::operators; template <typename T, typename Context> void TopkGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& indices, const DenseTensor& out_grad, const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* x_grad) { const auto& in_dims = x.dims(); const auto& out_dims = indices.dims(); int k = k_scalar.to<int>(); // get the real the axis and the k if (axis < 0) { axis += in_dims.size(); } const int& raw_height = in_dims[axis]; // allocate the cuda memory for the x_grad T* x_grad_data = dev_ctx.template Alloc<T>(x_grad); const T* out_grad_data = out_grad.data<T>(); const int64_t* indices_data = indices.data<int64_t>(); int pre, n, post; ops::GetDims(in_dims, axis, &pre, &n, &post); // calcluate the block and grid num auto ComputeBlockSize = [](int col) { if (col > 512) return 1024; else if (col > 256 && col <= 512) return 512; else if (col > 128 && col <= 256) return 256; else if (col > 64 && col <= 128) return 128; else return 64; }; int block_size = ComputeBlockSize(post * k); int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(((max_threads - 1) / block_size + 1), 1); int grid_size = ::min(max_blocks, pre); // lanuch the cuda kernel to assign the grad hipLaunchKernelGGL(( ops::AssignGradWithAxis<T>) , dim3(grid_size), dim3(block_size), 64 * 4, dev_ctx.stream(), out_grad_data, indices_data, x_grad_data, pre, post, n, k); } } // namespace phi PD_REGISTER_KERNEL(top_k_grad, GPU, ALL_LAYOUT, phi::TopkGradKernel, float, double, int, int64_t, phi::dtype::float16) {}
86ba9796beb69267d97a14736617282225b6025e.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/top_k_grad_kernel.h" namespace phi { namespace ops = paddle::operators; template <typename T, typename Context> void TopkGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& indices, const DenseTensor& out_grad, const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* x_grad) { const auto& in_dims = x.dims(); const auto& out_dims = indices.dims(); int k = k_scalar.to<int>(); // get the real the axis and the k if (axis < 0) { axis += in_dims.size(); } const int& raw_height = in_dims[axis]; // allocate the cuda memory for the x_grad T* x_grad_data = dev_ctx.template Alloc<T>(x_grad); const T* out_grad_data = out_grad.data<T>(); const int64_t* indices_data = indices.data<int64_t>(); int pre, n, post; ops::GetDims(in_dims, axis, &pre, &n, &post); // calcluate the block and grid num auto ComputeBlockSize = [](int col) { if (col > 512) return 1024; else if (col > 256 && col <= 512) return 512; else if (col > 128 && col <= 256) return 256; else if (col > 64 && col <= 128) return 128; else return 64; }; int block_size = ComputeBlockSize(post * k); int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(((max_threads - 1) / block_size + 1), 1); int grid_size = std::min(max_blocks, pre); // lanuch the cuda kernel to assign the grad ops::AssignGradWithAxis<T> <<<grid_size, block_size, 64 * 4, dev_ctx.stream()>>>( out_grad_data, indices_data, x_grad_data, pre, post, n, k); } } // namespace phi PD_REGISTER_KERNEL(top_k_grad, GPU, ALL_LAYOUT, phi::TopkGradKernel, float, double, int, int64_t, phi::dtype::float16) {}
4d8bcce559d5ef30726848774e588e912f03085a.hip
// !!! This is a file automatically generated by hipify!!! // Joshua Donnoe, Kyle Evens, and Dominik Haeflinger #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #define NUM_THREADS 256
4d8bcce559d5ef30726848774e588e912f03085a.cu
// Joshua Donnoe, Kyle Evens, and Dominik Haeflinger #include <stdlib.h> #include <stdio.h> #include <cuda.h> #define NUM_THREADS 256
feca3bf6ad74604857450c8453647fbd3d277ac2.hip
// !!! This is a file automatically generated by hipify!!! // ************************************************************************* // // PARALUTION www.paralution.com // // Copyright (C) 2012-2014 Dimitar Lukarski // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************* // PARALUTION version 0.7.0 #include "gpu_matrix_csr.hpp" #include "gpu_matrix_coo.hpp" #include "gpu_matrix_dia.hpp" #include "gpu_matrix_ell.hpp" #include "gpu_matrix_hyb.hpp" #include "gpu_matrix_mcsr.hpp" #include "gpu_matrix_bcsr.hpp" #include "gpu_matrix_dense.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_csr.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "../../utils/allocate_free.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_csr.hpp" #include "cuda_kernels_vector.hpp" #include "cusparse_csr.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include <assert.h> namespace paralution { template <typename ValueType> GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::GPUAcceleratorMatrixCSR()", "constructor with local_backend"); this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->set_backend(local_backend); this->L_mat_descr_ = 0; this->U_mat_descr_ = 0; this->L_mat_info_ = 0; this->U_mat_info_ = 0; this->mat_descr_ = 0; this->tmp_vec_ = NULL; CHECK_CUDA_ERROR(__FILE__, __LINE__); hipsparseStatus_t stat_t; stat_t = hipsparseCreateMatDescr(&this->mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->mat_descr_, HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixCSR<ValueType>::~GPUAcceleratorMatrixCSR() { LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::~GPUAcceleratorMatrixCSR()", "destructor"); this->Clear(); hipsparseStatus_t stat_t; stat_t = hipsparseDestroyMatDescr(this->mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixCSR<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::AllocateCSR(const int nnz, const int nrow, const int ncol) { assert(nnz >= 0); assert(ncol >= 0); assert(nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (nnz > 0) { allocate_gpu(nrow+1, &this->mat_.row_offset); allocate_gpu(nnz, &this->mat_.col); allocate_gpu(nnz, &this->mat_.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nrow+1, mat_.row_offset); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.val); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SetDataPtrCSR(int **row_offset, int **col, ValueType **val, const int nnz, const int nrow, const int ncol) { assert(*row_offset != NULL); assert(*col != NULL); assert(*val != NULL); assert(nnz > 0); assert(nrow > 0); assert(ncol > 0); this->Clear(); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; hipDeviceSynchronize(); this->mat_.row_offset = *row_offset; this->mat_.col = *col; this->mat_.val = *val; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LeaveDataPtrCSR(int **row_offset, int **col, ValueType **val) { assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); assert(this->get_nnz() > 0); hipDeviceSynchronize(); // see free_host function for details *row_offset = this->mat_.row_offset; *col = this->mat_.col; *val = this->mat_.val; this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.row_offset); free_gpu(&this->mat_.col); free_gpu(&this->mat_.val); this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; this->LUAnalyseClear(); this->LLAnalyseClear(); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::Zeros() { if (this->get_nnz() > 0) set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, this->get_nnz(), mat_.val); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { hipMemcpy(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { hipMemcpyAsync(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { hipMemcpy(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { hipMemcpyAsync(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { hipMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { hipMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) { this->CopyFrom(*cast_mat_csr); return true; } /* const GPUAcceleratorMatrixCOO<ValueType> *cast_mat_coo; if ((cast_mat_coo = dynamic_cast<const GPUAcceleratorMatrixCOO<ValueType>*> (&mat)) != NULL) { this->Clear(); TODO Allocate copy colmn copy val hipsparseStatus_t hipsparseXcoo2csr(hipsparseHandle_t handle, const int *cooRowInd, int nnz, int m, int *csrRowPtr, hipsparseIndexBase_t idxBase); FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_coo->get_nrow(); this->ncol_ = cast_mat_coo->get_ncol(); this->nnz_ = cast_mat_coo->get_nnz(); return true; } */ /* const GPUAcceleratorMatrixDENSE<ValueType> *cast_mat_dense; if ((cast_mat_dense = dynamic_cast<const GPUAcceleratorMatrixDENSE<ValueType>*> (&mat)) != NULL) { this->Clear(); int nnz = 0; FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_dense->get_nrow(); this->ncol_ = cast_mat_dense->get_ncol(); this->nnz_ = nnz; return true; } */ /* const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia; if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) { this->Clear(); int nnz = 0; FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_dia->get_nrow(); this->ncol_ = cast_mat_dia->get_ncol(); this->nnz_ = nnz ; return true; } */ /* const GPUAcceleratorMatrixELL<ValueType> *cast_mat_ell; if ((cast_mat_ell = dynamic_cast<const GPUAcceleratorMatrixELL<ValueType>*> (&mat)) != NULL) { this->Clear(); int nnz = 0; FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_ell->get_nrow(); this->ncol_ = cast_mat_ell->get_ncol(); this->nnz_ = nnz ; return true; } */ /* const GPUAcceleratorMatrixMCSR<ValueType> *cast_mat_mcsr; if ((cast_mat_mcsr = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&mat)) != NULL) { this->Clear(); FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_mcsr->get_nrow(); this->ncol_ = cast_mat_mcsr->get_ncol(); this->nnz_ = cast_mat_mcsr->get_nnz(); return true; } */ /* const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb; if ((cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) != NULL) { this->Clear(); FATAL_ERROR(__FILE__, __LINE__); int nnz = 0; this->nrow_ = cast_mat_hyb->get_nrow(); this->ncol_ = cast_mat_hyb->get_ncol(); this->nnz_ = nnz; return true; } */ return false; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::Permute( const BaseVector<int> &permutation){ assert(&permutation != NULL); assert(permutation.get_size() == this->get_nrow()); assert(permutation.get_size() == this->get_ncol()); if (this->get_nnz() > 0) { int *d_nnzr = NULL; int *d_nnzrPerm = NULL; int *d_nnzPerm = NULL; int *d_offset = NULL; ValueType *d_data = NULL; allocate_gpu<int>(this->get_nrow(), &d_nnzr); allocate_gpu<int>(this->get_nrow(), &d_nnzrPerm); allocate_gpu<int>((this->get_nrow()+1), &d_nnzPerm); allocate_gpu<ValueType>(this->get_nnz(), &d_data); allocate_gpu<int>(this->get_nnz(), &d_offset); const GPUAcceleratorVector<int> *cast_perm = dynamic_cast<const GPUAcceleratorVector<int>*> (&permutation); assert(cast_perm != NULL); int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_calc_row_nnz<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, d_nnzr); CHECK_CUDA_ERROR(__FILE__,__LINE__); hipLaunchKernelGGL(( kernel_permute_row_nnz<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), d_nnzr, cast_perm->vec_, d_nnzrPerm); CHECK_CUDA_ERROR(__FILE__,__LINE__); //TODO //move in extra file cum_sum<int, 256>(d_nnzPerm, d_nnzrPerm, this->get_nrow()); hipLaunchKernelGGL(( kernel_permute_rows<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, d_nnzPerm, this->mat_.col, this->mat_.val, cast_perm->vec_, d_nnzr, d_offset, d_data); CHECK_CUDA_ERROR(__FILE__,__LINE__); free_gpu<int>(&this->mat_.row_offset); this->mat_.row_offset = d_nnzPerm; int *d_buffer = NULL; int *h_buffer = NULL; int GROUP_SIZE; int LOCAL_SIZE; int FinalReduceSize; allocate_gpu<int>(this->local_backend_.GPU_wrap * 4, &d_buffer); dim3 BlockSize2(this->local_backend_.GPU_block_size); dim3 GridSize2(this->local_backend_.GPU_wrap * 4); GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 ) / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size; LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size; hipLaunchKernelGGL(( kernel_max<int, int, 256>) , dim3(GridSize2), dim3(BlockSize2), 0, 0, nrow, d_nnzr, d_buffer, GROUP_SIZE, LOCAL_SIZE); CHECK_CUDA_ERROR(__FILE__, __LINE__); FinalReduceSize = this->local_backend_.GPU_wrap * 4; allocate_host(FinalReduceSize, &h_buffer); hipMemcpy(h_buffer, // dst d_buffer, // src FinalReduceSize*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&d_buffer); int maxnnzrow = 0; for (int i=0; i<FinalReduceSize; ++i) if (maxnnzrow < h_buffer[i]) maxnnzrow = h_buffer[i]; free_host(&h_buffer); //TODO what should happen in this case?? if (maxnnzrow > 1024) FATAL_ERROR(__FILE__, __LINE__) else if (maxnnzrow > 512) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 1024>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 256) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 512>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 128) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 256>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 64) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 128>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 32) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 64>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 16) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 32>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 8) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 16>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 4) hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 8>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else hipLaunchKernelGGL(( kernel_permute_cols<ValueType, int, 4>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); free_gpu<int>(&d_offset); free_gpu<ValueType>(&d_data); free_gpu<int>(&d_nnzrPerm); free_gpu<int>(&d_nnzr); } return true; } template <> void GPUAcceleratorMatrixCSR<float>::Apply(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; const float scalar = 1.0; const float beta = 0.0; stat_t = hipsparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse instead... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_spmv_scalar<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> void GPUAcceleratorMatrixCSR<double>::Apply(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; const double scalar = 1.0; const double beta = 0.0; stat_t = hipsparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse instead... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_spmv_scalar<double, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> void GPUAcceleratorMatrixCSR<float>::ApplyAdd(const BaseVector<float> &in, const float scalar, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; const float beta = 1.0; stat_t = hipsparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse now... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_spmv_scalar<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> void GPUAcceleratorMatrixCSR<double>::ApplyAdd(const BaseVector<double> &in, const double scalar, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; const double beta = 1.0; stat_t = hipsparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse now... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_spmv_scalar<double, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> bool GPUAcceleratorMatrixCSR<float>::ILU0Factorize(void) { if (this->get_nnz() > 0) { hipsparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDestroySolveAnalysisInfo(infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::ILU0Factorize(void) { if (this->get_nnz() > 0) { hipsparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDestroySolveAnalysisInfo(infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::ICFactorize(BaseVector<float> *inv_diag) { if (this->get_nnz() > 0) { hipsparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_SYMMETRIC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::ICFactorize(BaseVector<double> *inv_diag) { if (this->get_nnz() > 0) { hipsparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_SYMMETRIC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> void GPUAcceleratorMatrixCSR<float>::LUAnalyse(void) { hipsparseStatus_t stat_t; // L part stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis // L stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<float>; assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <> void GPUAcceleratorMatrixCSR<double>::LUAnalyse(void) { hipsparseStatus_t stat_t; // L part stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_); assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LUAnalyseClear(void) { hipsparseStatus_t stat_t; if (this->L_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->L_mat_descr_ != 0) { stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_descr_ != 0) { stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->L_mat_descr_ = 0; this->U_mat_descr_ = 0; this->L_mat_info_ = 0; this->U_mat_info_ = 0; delete this->tmp_vec_ ; this->tmp_vec_ = NULL; } template <> bool GPUAcceleratorMatrixCSR<float>::LUSolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ != NULL); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; float one = float(1.0); // Solve L stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::LUSolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; double one = double(1.0); // Solve L stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, this->tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, this->tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> void GPUAcceleratorMatrixCSR<float>::LLAnalyse(void) { hipsparseStatus_t stat_t; // L part stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<float>(this->local_backend_); assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <> void GPUAcceleratorMatrixCSR<double>::LLAnalyse(void) { hipsparseStatus_t stat_t; // L part stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_); assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LLAnalyseClear(void) { hipsparseStatus_t stat_t; if (this->L_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->L_mat_descr_ != 0) { stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_descr_ != 0) { stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->L_mat_descr_ = 0; this->U_mat_descr_ = 0; this->L_mat_info_ = 0; this->U_mat_info_ = 0; delete this->tmp_vec_ ; this->tmp_vec_ = NULL; } template <> bool GPUAcceleratorMatrixCSR<float>::LLSolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; float one = float(1.0); // Solve L stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, this->tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, this->tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::LLSolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; double one = double(1.0); // Solve L stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, this->tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, this->tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::LLSolve(const BaseVector<ValueType> &in, const BaseVector<ValueType> &inv_diag, BaseVector<ValueType> *out) const { return LLSolve(in, out); } template <> void GPUAcceleratorMatrixCSR<double>::LAnalyse(const bool diag_unit) { hipsparseStatus_t stat_t; // L part stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT); } else { stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <> void GPUAcceleratorMatrixCSR<float>::LAnalyse(const bool diag_unit) { hipsparseStatus_t stat_t; // L part stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT); } else { stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <> void GPUAcceleratorMatrixCSR<double>::UAnalyse(const bool diag_unit) { hipsparseStatus_t stat_t; // U upart stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT); } else { stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <> void GPUAcceleratorMatrixCSR<float>::UAnalyse(const bool diag_unit) { hipsparseStatus_t stat_t; // U part stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT); } else { stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LAnalyseClear(void) { hipsparseStatus_t stat_t; if (this->L_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->L_mat_descr_ != 0) { stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->L_mat_descr_ = 0; this->L_mat_info_ = 0; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::UAnalyseClear(void) { hipsparseStatus_t stat_t; if (this->U_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_descr_ != 0) { stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->U_mat_descr_ = 0; this->U_mat_info_ = 0; } template <> bool GPUAcceleratorMatrixCSR<double>::LSolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; double one = double(1.0); // Solve L stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::LSolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; float one = float(1.0); // Solve L stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::USolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; double one = double(1.0); // Solve U stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::USolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); hipsparseStatus_t stat_t; float one = float(1.0); // Solve U stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractDiagonal(BaseVector<ValueType> *vec_diag) const { if (this->get_nnz() > 0) { assert(vec_diag != NULL); assert(vec_diag->get_size() == this->get_nrow()); GPUAcceleratorVector<ValueType> *cast_vec_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_diag) ; int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_extract_diag<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_vec_diag->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractInverseDiagonal(BaseVector<ValueType> *vec_inv_diag) const { if (this->get_nnz() > 0) { assert(vec_inv_diag != NULL); assert(vec_inv_diag->get_size() == this->get_nrow()); GPUAcceleratorVector<ValueType> *cast_vec_inv_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_inv_diag) ; int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_extract_inv_diag<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_vec_inv_diag->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractInverseDiagonal_sqrt(BaseVector<ValueType> *vec_inv_diag, int power) const { if (this->get_nnz() > 0) { assert(vec_inv_diag != NULL); assert(vec_inv_diag->get_size() == this->get_nrow()); GPUAcceleratorVector<ValueType> *cast_vec_inv_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_inv_diag) ; int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); // threads sync should not be needed // hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_csr_extract_inv_diag_sqrt<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_vec_inv_diag->vec_, power); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractSubMatrix(const int row_offset, const int col_offset, const int row_size, const int col_size, BaseMatrix<ValueType> *mat) const { assert(mat != NULL); assert(row_offset >= 0); assert(col_offset >= 0); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (mat) ; assert(cast_mat != NULL); int mat_nnz = 0; int *row_nnz = NULL; //int *red_row_nnz (int *) malloc(sizeof(int)*(row_size+1)); int *sub_nnz = NULL; allocate_gpu<int>(row_size+1, &sub_nnz); allocate_gpu(row_size+1, &row_nnz); // compute the nnz per row in the new matrix dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(row_size / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_extract_submatrix_row_nnz<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val, row_offset, col_offset, row_size, col_size, row_nnz); CHECK_CUDA_ERROR(__FILE__, __LINE__); // compute the new nnz by reduction // CPU reduction /* hipMemcpy(red_row_nnz, // dst row_nnz, // src (row_size+1)*sizeof(int), // size hipMemcpyDeviceToHost); int sum=0; for (int i=0; i<row_size; ++i) { int tmp = red_row_nnz[i]; red_row_nnz[i] = sum; sum += tmp; } mat_nnz = red_row_nnz[row_size] = sum ; */ //TODO //move in extra file cum_sum<int, 256>(sub_nnz, row_nnz, row_size); hipMemcpy(&mat_nnz, &sub_nnz[row_size], sizeof(int), hipMemcpyDeviceToHost); // not empty submatrix if (mat_nnz > 0) { cast_mat->AllocateCSR(mat_nnz, row_size, col_size); // part of the CPU reduction section /* hipMemcpy(cast_mat->mat_.row_offset, // dst red_row_nnz, // src (row_size+1)*sizeof(int), // size hipMemcpyHostToDevice); */ free_gpu<int>(&cast_mat->mat_.row_offset); cast_mat->mat_.row_offset = sub_nnz; // copying the sub matrix hipLaunchKernelGGL(( kernel_csr_extract_submatrix_copy<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val, row_offset, col_offset, row_size, col_size, cast_mat->mat_.row_offset, cast_mat->mat_.col, cast_mat->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } free_gpu(&row_nnz); return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractL(BaseMatrix<ValueType> *L) const { assert(L != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L); assert(cast_L != NULL); cast_L->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_slower_nnz_per_row<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_L->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); hipMemcpy(h_buffer+1, // dst cast_L->mat_.row_offset+1, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; hipMemcpy(cast_L->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_L->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val); // fill lower triangular part hipLaunchKernelGGL(( kernel_csr_extract_l_triangular<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_L->mat_.row_offset, cast_L->mat_.col, cast_L->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_L->nrow_ = this->get_nrow(); cast_L->ncol_ = this->get_ncol(); cast_L->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractLDiagonal(BaseMatrix<ValueType> *L) const { assert(L != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L); assert(cast_L != NULL); cast_L->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_lower_nnz_per_row<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_L->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); hipMemcpy(h_buffer+1, // dst cast_L->mat_.row_offset+1, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; hipMemcpy(cast_L->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_L->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val); // fill lower triangular part hipLaunchKernelGGL(( kernel_csr_extract_l_triangular<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_L->mat_.row_offset, cast_L->mat_.col, cast_L->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_L->nrow_ = this->get_nrow(); cast_L->ncol_ = this->get_ncol(); cast_L->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractU(BaseMatrix<ValueType> *U) const { assert(U != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U); assert(cast_U != NULL); cast_U->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_supper_nnz_per_row<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_U->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); hipMemcpy(h_buffer+1, // dst cast_U->mat_.row_offset+1, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; hipMemcpy(cast_U->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_U->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val); // fill upper triangular part hipLaunchKernelGGL(( kernel_csr_extract_u_triangular<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_U->mat_.row_offset, cast_U->mat_.col, cast_U->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_U->nrow_ = this->get_nrow(); cast_U->ncol_ = this->get_ncol(); cast_U->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractUDiagonal(BaseMatrix<ValueType> *U) const { assert(U != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U); assert(cast_U != NULL); cast_U->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_upper_nnz_per_row<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_U->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); hipMemcpy(h_buffer+1, // dst cast_U->mat_.row_offset+1, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; hipMemcpy(cast_U->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_U->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val); // fill lower triangular part hipLaunchKernelGGL(( kernel_csr_extract_u_triangular<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_U->mat_.row_offset, cast_U->mat_.col, cast_U->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_U->nrow_ = this->get_nrow(); cast_U->ncol_ = this->get_ncol(); cast_U->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MaximalIndependentSet(int &size, BaseVector<int> *permutation) const { assert(permutation != NULL); GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation); assert(cast_perm != NULL); assert(this->get_nrow() == this->get_ncol()); int *h_row_offset = NULL; int *h_col = NULL; allocate_host(this->get_nrow()+1, &h_row_offset); allocate_host(this->get_nnz(), &h_col); hipMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), hipMemcpyDeviceToHost); int *mis = NULL; allocate_host(this->get_nrow(), &mis); memset(mis, 0, sizeof(int)*this->get_nrow()); size = 0 ; for (int ai=0; ai<this->get_nrow(); ++ai) { if (mis[ai] == 0) { // set the node mis[ai] = 1; ++size ; //remove all nbh nodes (without diagonal) for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj) if (ai != h_col[aj]) mis[h_col[aj]] = -1 ; } } int *h_perm = NULL; allocate_host(this->get_nrow(), &h_perm); int pos = 0; for (int ai=0; ai<this->get_nrow(); ++ai) { if (mis[ai] == 1) { h_perm[ai] = pos; ++pos; } else { h_perm[ai] = size + ai - pos; } } // Check the permutation // // for (int ai=0; ai<this->get_nrow(); ++ai) { // assert( h_perm[ai] >= 0 ); // assert( h_perm[ai] < this->get_nrow() ); // } cast_perm->Allocate(this->get_nrow()); hipMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), hipMemcpyHostToDevice); free_host(&h_row_offset); free_host(&h_col); free_host(&h_perm); free_host(&mis); return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MultiColoring(int &num_colors, int **size_colors, BaseVector<int> *permutation) const { assert(permutation != NULL); GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation); assert(cast_perm != NULL); // node colors (init value = 0 i.e. no color) int *color = NULL; int *h_row_offset = NULL; int *h_col = NULL; int size = this->get_nrow(); allocate_host(size, &color); allocate_host(this->get_nrow()+1, &h_row_offset); allocate_host(this->get_nnz(), &h_col); hipMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), hipMemcpyDeviceToHost); memset(color, 0, size*sizeof(int)); num_colors = 0; std::vector<bool> row_col; for (int ai=0; ai<this->get_nrow(); ++ai) { color[ai] = 1; row_col.clear(); row_col.assign(num_colors+2, false); for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj) if (ai != h_col[aj]) row_col[color[h_col[aj]]] = true; for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj) if (row_col[color[ai]] == true) ++color[ai]; if (color[ai] > num_colors) num_colors = color[ai]; } free_host(&h_row_offset); free_host(&h_col); allocate_host(num_colors, size_colors); set_to_zero_host(num_colors, *size_colors); int *offsets_color = NULL; allocate_host(num_colors, &offsets_color); memset(offsets_color, 0, sizeof(int)*num_colors); for (int i=0; i<this->get_nrow(); ++i) ++(*size_colors)[color[i]-1]; int total=0; for (int i=1; i<num_colors; ++i) { total += (*size_colors)[i-1]; offsets_color[i] = total; // LOG_INFO("offsets = " << total); } int *h_perm = NULL; allocate_host(this->get_nrow(), &h_perm); for (int i=0; i<this->get_nrow(); ++i) { h_perm[i] = offsets_color[ color[i]-1 ] ; ++offsets_color[color[i]-1]; } cast_perm->Allocate(this->get_nrow()); hipMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), hipMemcpyHostToDevice); free_host(&h_perm); free_host(&color); free_host(&offsets_color); return true; } template <> bool GPUAcceleratorMatrixCSR<double>::Scale(const double alpha) { if (this->get_nnz() > 0) { hipblasStatus_t stat_t; stat_t = hipblasDscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle), this->get_nnz(), &alpha, this->mat_.val, 1); CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::Scale(const float alpha) { if (this->get_nnz() > 0) { hipblasStatus_t stat_t; stat_t = hipblasSscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle), this->get_nnz(), &alpha, this->mat_.val, 1); CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ScaleDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_scale_diagonal<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ScaleOffDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_scale_offdiagonal<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_add_diagonal<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarOffDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_add_offdiagonal<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::AddScalar(const ValueType alpha) { if (this->get_nnz() > 0) { int nnz = this->get_nnz(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nnz / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_buffer_addscalar<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nnz, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMult(const BaseVector<ValueType> &diag) { assert(diag.get_size() == this->get_ncol()); const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag) ; assert(cast_diag!= NULL); if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_diagmatmult<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_diag->vec_, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMult_fromL(const BaseVector<ValueType> &diag) { assert(diag.get_size() == this->get_ncol()); const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag) ; assert(cast_diag!= NULL); if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_csr_diagmatmult_fromL<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_diag->vec_, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SymbolicPower(const int p) { FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SymbolicMatMatMult(const BaseMatrix<ValueType> &src) { FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MatMatMult(const BaseMatrix<ValueType> &A, const BaseMatrix<ValueType> &B) { assert(A.get_ncol() == B.get_nrow()); assert(A.get_nrow() > 0); assert(B.get_ncol() > 0); assert(B.get_nrow() > 0); const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_A = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&A); const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_B = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&B); assert(cast_mat_A != NULL); assert(cast_mat_B != NULL); this->Clear(); int m = cast_mat_A->get_nrow(); int n = cast_mat_B->get_ncol(); int k = cast_mat_B->get_nrow(); int nnzC = 0; allocate_gpu(m+1, &this->mat_.row_offset); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipsparseStatus_t stat_t; stat_t = hipsparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_POINTER_MODE_HOST); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseXcsrgemmNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, cast_mat_A->mat_descr_, cast_mat_A->get_nnz(), cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col, cast_mat_B->mat_descr_, cast_mat_B->get_nnz(), cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col, this->mat_descr_, this->mat_.row_offset, &nnzC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); allocate_gpu(nnzC, &this->mat_.col); CHECK_CUDA_ERROR(__FILE__, __LINE__); allocate_gpu(nnzC, &this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); this->nrow_ = m; this->ncol_ = n; this->nnz_ = nnzC; stat_t = __cusparseXcsrgemm__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, // A cast_mat_A->mat_descr_, cast_mat_A->get_nnz(), cast_mat_A->mat_.val, cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col, // B cast_mat_B->mat_descr_, cast_mat_B->get_nnz(), cast_mat_B->mat_.val, cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col, // C this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); return true; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SymbolicMatMatMult(const BaseMatrix<ValueType> &A, const BaseMatrix<ValueType> &B) { FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::Gershgorin(ValueType &lambda_min, ValueType &lambda_max) const { return false; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MatrixAdd(const BaseMatrix<ValueType> &mat, const ValueType alpha, const ValueType beta, const bool structure) { if (this->get_nnz() > 0) { const GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat); assert(cast_mat != NULL); assert(cast_mat->get_nrow() == this->get_nrow()); assert(cast_mat->get_ncol() == this->get_ncol()); assert(this ->get_nnz() > 0); assert(cast_mat->get_nnz() > 0); if (structure == false) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_add_csr_same_struct<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, cast_mat->mat_.row_offset, cast_mat->mat_.col, cast_mat->mat_.val, alpha, beta, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { // New structure with CUSPARSE routines int m = this->get_nrow(); int n = this->get_ncol(); int *csrRowPtrC = NULL; int *csrColC = NULL; ValueType *csrValC = NULL; int nnzC; allocate_gpu(m+1, &csrRowPtrC); hipsparseStatus_t stat_t; hipsparseMatDescr_t desc_mat_C = 0; stat_t = hipsparseCreateMatDescr(&desc_mat_C); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatIndexBase(desc_mat_C, HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetMatType(desc_mat_C, HIPSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), HIPSPARSE_POINTER_MODE_HOST); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseXcsrgeamNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), m, n, this->mat_descr_, this->get_nnz(), this->mat_.row_offset, this->mat_.col, cast_mat->mat_descr_, cast_mat->get_nnz(), cast_mat->mat_.row_offset, cast_mat->mat_.col, desc_mat_C, csrRowPtrC, &nnzC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); allocate_gpu(nnzC, &csrColC); allocate_gpu(nnzC, &csrValC); stat_t = __cusparseXcsrgeam__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), m, n, // A &alpha, this->mat_descr_, this->get_nnz(), this->mat_.val, this->mat_.row_offset, this->mat_.col, // B &beta, cast_mat->mat_descr_, cast_mat->get_nnz(), cast_mat->mat_.val, cast_mat->mat_.row_offset, cast_mat->mat_.col, // C desc_mat_C, csrValC, csrRowPtrC, csrColC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = hipsparseDestroyMatDescr(desc_mat_C); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); this->Clear(); this->mat_.row_offset = csrRowPtrC; this->mat_.col = csrColC; this->mat_.val = csrValC; this->nrow_ = m; this->ncol_ = n; this->nnz_ = nnzC; } } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::Compress(const ValueType drop_off) { if (this->get_nnz() > 0) { GPUAcceleratorMatrixCSR<ValueType> tmp(this->local_backend_); tmp.CopyFrom(*this); int mat_nnz = 0; int *row_offset = NULL; allocate_gpu(this->get_nrow()+1, &row_offset); int *mat_row_offset = NULL; allocate_gpu(this->get_nrow()+1, &mat_row_offset); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, this->get_nrow()+1, row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(this->get_nrow() / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_csr_compress_count_nrow<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val, this->get_nrow(), drop_off, row_offset); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO cum_sum<int, 256>(mat_row_offset, row_offset, this->get_nrow()); // get the new mat nnz hipMemcpy(&mat_nnz, &mat_row_offset[this->get_nrow()], sizeof(int), hipMemcpyDeviceToHost); this->AllocateCSR(mat_nnz, this->get_nrow(), this->get_ncol()); // TODO - just exchange memory pointers // copy row_offset hipMemcpy(this->mat_.row_offset, mat_row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToDevice); // copy col and val hipLaunchKernelGGL(( kernel_csr_compress_copy<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, tmp.mat_.row_offset, tmp.mat_.col, tmp.mat_.val, tmp.get_nrow(), drop_off, this->mat_.row_offset, this->mat_.col, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&row_offset); free_gpu(&mat_row_offset); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::Transpose(void) { if (this->get_nnz() > 0) { GPUAcceleratorMatrixCSR<double> tmp(this->local_backend_); tmp.CopyFrom(*this); this->Clear(); this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow()); hipsparseStatus_t stat_t; stat_t = hipsparseDcsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(), tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col, this->mat_.val, this->mat_.col, this->mat_.row_offset, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::Transpose(void) { if (this->get_nnz() > 0) { GPUAcceleratorMatrixCSR<float> tmp(this->local_backend_); tmp.CopyFrom(*this); this->Clear(); this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow()); hipsparseStatus_t stat_t; stat_t = hipsparseScsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(), tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col, this->mat_.val, this->mat_.col, this->mat_.row_offset, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template class GPUAcceleratorMatrixCSR<double>; template class GPUAcceleratorMatrixCSR<float>; }
feca3bf6ad74604857450c8453647fbd3d277ac2.cu
// ************************************************************************* // // PARALUTION www.paralution.com // // Copyright (C) 2012-2014 Dimitar Lukarski // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************* // PARALUTION version 0.7.0 #include "gpu_matrix_csr.hpp" #include "gpu_matrix_coo.hpp" #include "gpu_matrix_dia.hpp" #include "gpu_matrix_ell.hpp" #include "gpu_matrix_hyb.hpp" #include "gpu_matrix_mcsr.hpp" #include "gpu_matrix_bcsr.hpp" #include "gpu_matrix_dense.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_csr.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "../../utils/allocate_free.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_csr.hpp" #include "cuda_kernels_vector.hpp" #include "cusparse_csr.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <cuda.h> #include <cusparse_v2.h> #include <assert.h> namespace paralution { template <typename ValueType> GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::GPUAcceleratorMatrixCSR()", "constructor with local_backend"); this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->set_backend(local_backend); this->L_mat_descr_ = 0; this->U_mat_descr_ = 0; this->L_mat_info_ = 0; this->U_mat_info_ = 0; this->mat_descr_ = 0; this->tmp_vec_ = NULL; CHECK_CUDA_ERROR(__FILE__, __LINE__); cusparseStatus_t stat_t; stat_t = cusparseCreateMatDescr(&this->mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->mat_descr_, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->mat_descr_, CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixCSR<ValueType>::~GPUAcceleratorMatrixCSR() { LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::~GPUAcceleratorMatrixCSR()", "destructor"); this->Clear(); cusparseStatus_t stat_t; stat_t = cusparseDestroyMatDescr(this->mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixCSR<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::AllocateCSR(const int nnz, const int nrow, const int ncol) { assert(nnz >= 0); assert(ncol >= 0); assert(nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (nnz > 0) { allocate_gpu(nrow+1, &this->mat_.row_offset); allocate_gpu(nnz, &this->mat_.col); allocate_gpu(nnz, &this->mat_.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nrow+1, mat_.row_offset); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.val); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SetDataPtrCSR(int **row_offset, int **col, ValueType **val, const int nnz, const int nrow, const int ncol) { assert(*row_offset != NULL); assert(*col != NULL); assert(*val != NULL); assert(nnz > 0); assert(nrow > 0); assert(ncol > 0); this->Clear(); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; cudaDeviceSynchronize(); this->mat_.row_offset = *row_offset; this->mat_.col = *col; this->mat_.val = *val; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LeaveDataPtrCSR(int **row_offset, int **col, ValueType **val) { assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); assert(this->get_nnz() > 0); cudaDeviceSynchronize(); // see free_host function for details *row_offset = this->mat_.row_offset; *col = this->mat_.col; *val = this->mat_.val; this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.row_offset); free_gpu(&this->mat_.col); free_gpu(&this->mat_.val); this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; this->LUAnalyseClear(); this->LLAnalyseClear(); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::Zeros() { if (this->get_nnz() > 0) set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, this->get_nnz(), mat_.val); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { cudaMemcpy(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { cudaMemcpyAsync(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { cudaMemcpy(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { cudaMemcpyAsync(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { cudaMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_nnz() > 0) { cudaMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { cudaMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_nnz() > 0) { cudaMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) { this->CopyFrom(*cast_mat_csr); return true; } /* const GPUAcceleratorMatrixCOO<ValueType> *cast_mat_coo; if ((cast_mat_coo = dynamic_cast<const GPUAcceleratorMatrixCOO<ValueType>*> (&mat)) != NULL) { this->Clear(); TODO Allocate copy colmn copy val cusparseStatus_t cusparseXcoo2csr(cusparseHandle_t handle, const int *cooRowInd, int nnz, int m, int *csrRowPtr, cusparseIndexBase_t idxBase); FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_coo->get_nrow(); this->ncol_ = cast_mat_coo->get_ncol(); this->nnz_ = cast_mat_coo->get_nnz(); return true; } */ /* const GPUAcceleratorMatrixDENSE<ValueType> *cast_mat_dense; if ((cast_mat_dense = dynamic_cast<const GPUAcceleratorMatrixDENSE<ValueType>*> (&mat)) != NULL) { this->Clear(); int nnz = 0; FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_dense->get_nrow(); this->ncol_ = cast_mat_dense->get_ncol(); this->nnz_ = nnz; return true; } */ /* const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia; if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) { this->Clear(); int nnz = 0; FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_dia->get_nrow(); this->ncol_ = cast_mat_dia->get_ncol(); this->nnz_ = nnz ; return true; } */ /* const GPUAcceleratorMatrixELL<ValueType> *cast_mat_ell; if ((cast_mat_ell = dynamic_cast<const GPUAcceleratorMatrixELL<ValueType>*> (&mat)) != NULL) { this->Clear(); int nnz = 0; FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_ell->get_nrow(); this->ncol_ = cast_mat_ell->get_ncol(); this->nnz_ = nnz ; return true; } */ /* const GPUAcceleratorMatrixMCSR<ValueType> *cast_mat_mcsr; if ((cast_mat_mcsr = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&mat)) != NULL) { this->Clear(); FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_mcsr->get_nrow(); this->ncol_ = cast_mat_mcsr->get_ncol(); this->nnz_ = cast_mat_mcsr->get_nnz(); return true; } */ /* const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb; if ((cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) != NULL) { this->Clear(); FATAL_ERROR(__FILE__, __LINE__); int nnz = 0; this->nrow_ = cast_mat_hyb->get_nrow(); this->ncol_ = cast_mat_hyb->get_ncol(); this->nnz_ = nnz; return true; } */ return false; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::Permute( const BaseVector<int> &permutation){ assert(&permutation != NULL); assert(permutation.get_size() == this->get_nrow()); assert(permutation.get_size() == this->get_ncol()); if (this->get_nnz() > 0) { int *d_nnzr = NULL; int *d_nnzrPerm = NULL; int *d_nnzPerm = NULL; int *d_offset = NULL; ValueType *d_data = NULL; allocate_gpu<int>(this->get_nrow(), &d_nnzr); allocate_gpu<int>(this->get_nrow(), &d_nnzrPerm); allocate_gpu<int>((this->get_nrow()+1), &d_nnzPerm); allocate_gpu<ValueType>(this->get_nnz(), &d_data); allocate_gpu<int>(this->get_nnz(), &d_offset); const GPUAcceleratorVector<int> *cast_perm = dynamic_cast<const GPUAcceleratorVector<int>*> (&permutation); assert(cast_perm != NULL); int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_calc_row_nnz<int> <<< GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, d_nnzr); CHECK_CUDA_ERROR(__FILE__,__LINE__); kernel_permute_row_nnz<int> <<< GridSize, BlockSize>>>(this->get_nrow(), d_nnzr, cast_perm->vec_, d_nnzrPerm); CHECK_CUDA_ERROR(__FILE__,__LINE__); //TODO //move in extra file cum_sum<int, 256>(d_nnzPerm, d_nnzrPerm, this->get_nrow()); kernel_permute_rows<ValueType, int> <<<GridSize, BlockSize>>>( this->get_nrow(), this->mat_.row_offset, d_nnzPerm, this->mat_.col, this->mat_.val, cast_perm->vec_, d_nnzr, d_offset, d_data); CHECK_CUDA_ERROR(__FILE__,__LINE__); free_gpu<int>(&this->mat_.row_offset); this->mat_.row_offset = d_nnzPerm; int *d_buffer = NULL; int *h_buffer = NULL; int GROUP_SIZE; int LOCAL_SIZE; int FinalReduceSize; allocate_gpu<int>(this->local_backend_.GPU_wrap * 4, &d_buffer); dim3 BlockSize2(this->local_backend_.GPU_block_size); dim3 GridSize2(this->local_backend_.GPU_wrap * 4); GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 ) / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size; LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size; kernel_max<int, int, 256> <<<GridSize2, BlockSize2>>> (nrow, d_nnzr, d_buffer, GROUP_SIZE, LOCAL_SIZE); CHECK_CUDA_ERROR(__FILE__, __LINE__); FinalReduceSize = this->local_backend_.GPU_wrap * 4; allocate_host(FinalReduceSize, &h_buffer); cudaMemcpy(h_buffer, // dst d_buffer, // src FinalReduceSize*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&d_buffer); int maxnnzrow = 0; for (int i=0; i<FinalReduceSize; ++i) if (maxnnzrow < h_buffer[i]) maxnnzrow = h_buffer[i]; free_host(&h_buffer); //TODO what should happen in this case?? if (maxnnzrow > 1024) FATAL_ERROR(__FILE__, __LINE__) else if (maxnnzrow > 512) kernel_permute_cols<ValueType, int, 1024> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 256) kernel_permute_cols<ValueType, int, 512> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 128) kernel_permute_cols<ValueType, int, 256> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 64) kernel_permute_cols<ValueType, int, 128> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 32) kernel_permute_cols<ValueType, int, 64> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 16) kernel_permute_cols<ValueType, int, 32> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 8) kernel_permute_cols<ValueType, int, 16> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else if (maxnnzrow > 4) kernel_permute_cols<ValueType, int, 8> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); else kernel_permute_cols<ValueType, int, 4> <<<GridSize, BlockSize>>>(this->get_nrow(), this->mat_.row_offset, cast_perm->vec_, d_nnzrPerm, d_offset, d_data, this->mat_.col, this->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); free_gpu<int>(&d_offset); free_gpu<ValueType>(&d_data); free_gpu<int>(&d_nnzrPerm); free_gpu<int>(&d_nnzr); } return true; } template <> void GPUAcceleratorMatrixCSR<float>::Apply(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; const float scalar = 1.0; const float beta = 0.0; stat_t = cusparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse instead... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_spmv_scalar<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> void GPUAcceleratorMatrixCSR<double>::Apply(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; const double scalar = 1.0; const double beta = 0.0; stat_t = cusparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse instead... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_spmv_scalar<double, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> void GPUAcceleratorMatrixCSR<float>::ApplyAdd(const BaseVector<float> &in, const float scalar, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; const float beta = 1.0; stat_t = cusparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse now... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_spmv_scalar<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> void GPUAcceleratorMatrixCSR<double>::ApplyAdd(const BaseVector<double> &in, const double scalar, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; const double beta = 1.0; stat_t = cusparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar, this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, cast_in->vec_, &beta, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); /* // Using cusparse now... int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_spmv_scalar<double, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); */ } } template <> bool GPUAcceleratorMatrixCSR<float>::ILU0Factorize(void) { if (this->get_nnz() > 0) { cusparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDestroySolveAnalysisInfo(infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::ILU0Factorize(void) { if (this->get_nnz() > 0) { cusparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDestroySolveAnalysisInfo(infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::ICFactorize(BaseVector<float> *inv_diag) { if (this->get_nnz() > 0) { cusparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->mat_descr_, CUSPARSE_MATRIX_TYPE_SYMMETRIC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::ICFactorize(BaseVector<double> *inv_diag) { if (this->get_nnz() > 0) { cusparseStatus_t stat_t; cusparseSolveAnalysisInfo_t infoA = 0; stat_t = cusparseCreateSolveAnalysisInfo(&infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->mat_descr_, CUSPARSE_MATRIX_TYPE_SYMMETRIC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, infoA); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> void GPUAcceleratorMatrixCSR<float>::LUAnalyse(void) { cusparseStatus_t stat_t; // L part stat_t = cusparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->L_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->L_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->L_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = cusparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->U_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->U_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->U_mat_descr_, CUSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis // L stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<float>; assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <> void GPUAcceleratorMatrixCSR<double>::LUAnalyse(void) { cusparseStatus_t stat_t; // L part stat_t = cusparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->L_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->L_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->L_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = cusparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->U_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->U_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->U_mat_descr_, CUSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_); assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LUAnalyseClear(void) { cusparseStatus_t stat_t; if (this->L_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->L_mat_descr_ != 0) { stat_t = cusparseDestroyMatDescr(this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_descr_ != 0) { stat_t = cusparseDestroyMatDescr(this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->L_mat_descr_ = 0; this->U_mat_descr_ = 0; this->L_mat_info_ = 0; this->U_mat_info_ = 0; delete this->tmp_vec_ ; this->tmp_vec_ = NULL; } template <> bool GPUAcceleratorMatrixCSR<float>::LUSolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ != NULL); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; float one = float(1.0); // Solve L stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::LUSolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; double one = double(1.0); // Solve L stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, this->tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, this->tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> void GPUAcceleratorMatrixCSR<float>::LLAnalyse(void) { cusparseStatus_t stat_t; // L part stat_t = cusparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->L_mat_descr_,CUSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->L_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->L_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = cusparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->U_mat_descr_,CUSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->U_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->U_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<float>(this->local_backend_); assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <> void GPUAcceleratorMatrixCSR<double>::LLAnalyse(void) { cusparseStatus_t stat_t; // L part stat_t = cusparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->L_mat_descr_,CUSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->L_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->L_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // U part stat_t = cusparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->U_mat_descr_,CUSPARSE_MATRIX_TYPE_TRIANGULAR); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->U_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->U_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); assert(this->get_ncol() == this->get_nrow()); assert(this->tmp_vec_ == NULL); this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_); assert(this->tmp_vec_ != NULL); tmp_vec_->Allocate(this->get_nrow()); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LLAnalyseClear(void) { cusparseStatus_t stat_t; if (this->L_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->L_mat_descr_ != 0) { stat_t = cusparseDestroyMatDescr(this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_descr_ != 0) { stat_t = cusparseDestroyMatDescr(this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->L_mat_descr_ = 0; this->U_mat_descr_ = 0; this->L_mat_info_ = 0; this->U_mat_info_ = 0; delete this->tmp_vec_ ; this->tmp_vec_ = NULL; } template <> bool GPUAcceleratorMatrixCSR<float>::LLSolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; float one = float(1.0); // Solve L stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, this->tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, this->tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::LLSolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; double one = double(1.0); // Solve L stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, this->tmp_vec_->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Solve U stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, this->tmp_vec_->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::LLSolve(const BaseVector<ValueType> &in, const BaseVector<ValueType> &inv_diag, BaseVector<ValueType> *out) const { return LLSolve(in, out); } template <> void GPUAcceleratorMatrixCSR<double>::LAnalyse(const bool diag_unit) { cusparseStatus_t stat_t; // L part stat_t = cusparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->L_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->L_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->L_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_UNIT); } else { stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <> void GPUAcceleratorMatrixCSR<float>::LAnalyse(const bool diag_unit) { cusparseStatus_t stat_t; // L part stat_t = cusparseCreateMatDescr(&this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->L_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->L_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->L_mat_descr_, CUSPARSE_FILL_MODE_LOWER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_UNIT); } else { stat_t = cusparseSetMatDiagType(this->L_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <> void GPUAcceleratorMatrixCSR<double>::UAnalyse(const bool diag_unit) { cusparseStatus_t stat_t; // U upart stat_t = cusparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->U_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->U_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->U_mat_descr_, CUSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_UNIT); } else { stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <> void GPUAcceleratorMatrixCSR<float>::UAnalyse(const bool diag_unit) { cusparseStatus_t stat_t; // U part stat_t = cusparseCreateMatDescr(&this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(this->U_mat_descr_,CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(this->U_mat_descr_,CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatFillMode(this->U_mat_descr_, CUSPARSE_FILL_MODE_UPPER); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); if (diag_unit == true) { stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_UNIT); } else { stat_t = cusparseSetMatDiagType(this->U_mat_descr_, CUSPARSE_DIAG_TYPE_NON_UNIT); } CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); // Analysis stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), this->get_nnz(), this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::LAnalyseClear(void) { cusparseStatus_t stat_t; if (this->L_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->L_mat_descr_ != 0) { stat_t = cusparseDestroyMatDescr(this->L_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->L_mat_descr_ = 0; this->L_mat_info_ = 0; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::UAnalyseClear(void) { cusparseStatus_t stat_t; if (this->U_mat_info_ != 0) { stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } if (this->U_mat_descr_ != 0) { stat_t = cusparseDestroyMatDescr(this->U_mat_descr_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } this->U_mat_descr_ = 0; this->U_mat_info_ = 0; } template <> bool GPUAcceleratorMatrixCSR<double>::LSolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; double one = double(1.0); // Solve L stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::LSolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; float one = float(1.0); // Solve L stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->L_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->L_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::USolve(const BaseVector<double> &in, BaseVector<double> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ; GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; double one = double(1.0); // Solve U stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::USolve(const BaseVector<float> &in, BaseVector<float> *out) const { if (this->get_nnz() > 0) { assert(this->L_mat_descr_ != 0); assert(this->U_mat_descr_ != 0); assert(this->L_mat_info_ != 0); assert(this->U_mat_info_ != 0); assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); assert(this->get_ncol() == this->get_nrow()); const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ; GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); cusparseStatus_t stat_t; float one = float(1.0); // Solve U stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, this->get_nrow(), &one, this->U_mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col, this->U_mat_info_, cast_in->vec_, cast_out->vec_); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractDiagonal(BaseVector<ValueType> *vec_diag) const { if (this->get_nnz() > 0) { assert(vec_diag != NULL); assert(vec_diag->get_size() == this->get_nrow()); GPUAcceleratorVector<ValueType> *cast_vec_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_diag) ; int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_extract_diag<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_vec_diag->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractInverseDiagonal(BaseVector<ValueType> *vec_inv_diag) const { if (this->get_nnz() > 0) { assert(vec_inv_diag != NULL); assert(vec_inv_diag->get_size() == this->get_nrow()); GPUAcceleratorVector<ValueType> *cast_vec_inv_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_inv_diag) ; int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_extract_inv_diag<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_vec_inv_diag->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractInverseDiagonal_sqrt(BaseVector<ValueType> *vec_inv_diag, int power) const { if (this->get_nnz() > 0) { assert(vec_inv_diag != NULL); assert(vec_inv_diag->get_size() == this->get_nrow()); GPUAcceleratorVector<ValueType> *cast_vec_inv_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_inv_diag) ; int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); // threads sync should not be needed // cudaDeviceSynchronize(); kernel_csr_extract_inv_diag_sqrt<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_vec_inv_diag->vec_, power); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractSubMatrix(const int row_offset, const int col_offset, const int row_size, const int col_size, BaseMatrix<ValueType> *mat) const { assert(mat != NULL); assert(row_offset >= 0); assert(col_offset >= 0); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (mat) ; assert(cast_mat != NULL); int mat_nnz = 0; int *row_nnz = NULL; //int *red_row_nnz (int *) malloc(sizeof(int)*(row_size+1)); int *sub_nnz = NULL; allocate_gpu<int>(row_size+1, &sub_nnz); allocate_gpu(row_size+1, &row_nnz); // compute the nnz per row in the new matrix dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(row_size / this->local_backend_.GPU_block_size + 1); kernel_csr_extract_submatrix_row_nnz<ValueType, int> <<<GridSize, BlockSize>>> (this->mat_.row_offset, this->mat_.col, this->mat_.val, row_offset, col_offset, row_size, col_size, row_nnz); CHECK_CUDA_ERROR(__FILE__, __LINE__); // compute the new nnz by reduction // CPU reduction /* cudaMemcpy(red_row_nnz, // dst row_nnz, // src (row_size+1)*sizeof(int), // size cudaMemcpyDeviceToHost); int sum=0; for (int i=0; i<row_size; ++i) { int tmp = red_row_nnz[i]; red_row_nnz[i] = sum; sum += tmp; } mat_nnz = red_row_nnz[row_size] = sum ; */ //TODO //move in extra file cum_sum<int, 256>(sub_nnz, row_nnz, row_size); cudaMemcpy(&mat_nnz, &sub_nnz[row_size], sizeof(int), cudaMemcpyDeviceToHost); // not empty submatrix if (mat_nnz > 0) { cast_mat->AllocateCSR(mat_nnz, row_size, col_size); // part of the CPU reduction section /* cudaMemcpy(cast_mat->mat_.row_offset, // dst red_row_nnz, // src (row_size+1)*sizeof(int), // size cudaMemcpyHostToDevice); */ free_gpu<int>(&cast_mat->mat_.row_offset); cast_mat->mat_.row_offset = sub_nnz; // copying the sub matrix kernel_csr_extract_submatrix_copy<ValueType, int> <<<GridSize, BlockSize>>> (this->mat_.row_offset, this->mat_.col, this->mat_.val, row_offset, col_offset, row_size, col_size, cast_mat->mat_.row_offset, cast_mat->mat_.col, cast_mat->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } free_gpu(&row_nnz); return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractL(BaseMatrix<ValueType> *L) const { assert(L != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L); assert(cast_L != NULL); cast_L->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_slower_nnz_per_row<int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, cast_L->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); cudaMemcpy(h_buffer+1, // dst cast_L->mat_.row_offset+1, // src nrow*sizeof(int), // size cudaMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; cudaMemcpy(cast_L->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size cudaMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_L->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val); // fill lower triangular part kernel_csr_extract_l_triangular<ValueType, int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_L->mat_.row_offset, cast_L->mat_.col, cast_L->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_L->nrow_ = this->get_nrow(); cast_L->ncol_ = this->get_ncol(); cast_L->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractLDiagonal(BaseMatrix<ValueType> *L) const { assert(L != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L); assert(cast_L != NULL); cast_L->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_lower_nnz_per_row<int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, cast_L->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); cudaMemcpy(h_buffer+1, // dst cast_L->mat_.row_offset+1, // src nrow*sizeof(int), // size cudaMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; cudaMemcpy(cast_L->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size cudaMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_L->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val); // fill lower triangular part kernel_csr_extract_l_triangular<ValueType, int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_L->mat_.row_offset, cast_L->mat_.col, cast_L->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_L->nrow_ = this->get_nrow(); cast_L->ncol_ = this->get_ncol(); cast_L->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractU(BaseMatrix<ValueType> *U) const { assert(U != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U); assert(cast_U != NULL); cast_U->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_supper_nnz_per_row<int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, cast_U->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); cudaMemcpy(h_buffer+1, // dst cast_U->mat_.row_offset+1, // src nrow*sizeof(int), // size cudaMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; cudaMemcpy(cast_U->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size cudaMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_U->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val); // fill upper triangular part kernel_csr_extract_u_triangular<ValueType, int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_U->mat_.row_offset, cast_U->mat_.col, cast_U->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_U->nrow_ = this->get_nrow(); cast_U->ncol_ = this->get_ncol(); cast_U->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ExtractUDiagonal(BaseMatrix<ValueType> *U) const { assert(U != NULL); assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U); assert(cast_U != NULL); cast_U->Clear(); // compute nnz per row int nrow = this->get_nrow(); allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_upper_nnz_per_row<int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, cast_U->mat_.row_offset+1); CHECK_CUDA_ERROR(__FILE__,__LINE__); // partial sum row_nnz to obtain row_offset vector // TODO currently performing partial sum on host int *h_buffer = NULL; allocate_host(nrow+1, &h_buffer); cudaMemcpy(h_buffer+1, // dst cast_U->mat_.row_offset+1, // src nrow*sizeof(int), // size cudaMemcpyDeviceToHost); h_buffer[0] = 0; for (int i=1; i<nrow+1; ++i) h_buffer[i] += h_buffer[i-1]; int nnz_L = h_buffer[nrow]; cudaMemcpy(cast_U->mat_.row_offset, // dst h_buffer, // src (nrow+1)*sizeof(int), // size cudaMemcpyHostToDevice); free_host(&h_buffer); // end TODO // allocate lower triangular part structure allocate_gpu<int>(nnz_L, &cast_U->mat_.col); allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val); // fill lower triangular part kernel_csr_extract_u_triangular<ValueType, int> <<<GridSize, BlockSize>>>(nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_U->mat_.row_offset, cast_U->mat_.col, cast_U->mat_.val); CHECK_CUDA_ERROR(__FILE__,__LINE__); cast_U->nrow_ = this->get_nrow(); cast_U->ncol_ = this->get_ncol(); cast_U->nnz_ = nnz_L; return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MaximalIndependentSet(int &size, BaseVector<int> *permutation) const { assert(permutation != NULL); GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation); assert(cast_perm != NULL); assert(this->get_nrow() == this->get_ncol()); int *h_row_offset = NULL; int *h_col = NULL; allocate_host(this->get_nrow()+1, &h_row_offset); allocate_host(this->get_nnz(), &h_col); cudaMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), cudaMemcpyDeviceToHost); int *mis = NULL; allocate_host(this->get_nrow(), &mis); memset(mis, 0, sizeof(int)*this->get_nrow()); size = 0 ; for (int ai=0; ai<this->get_nrow(); ++ai) { if (mis[ai] == 0) { // set the node mis[ai] = 1; ++size ; //remove all nbh nodes (without diagonal) for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj) if (ai != h_col[aj]) mis[h_col[aj]] = -1 ; } } int *h_perm = NULL; allocate_host(this->get_nrow(), &h_perm); int pos = 0; for (int ai=0; ai<this->get_nrow(); ++ai) { if (mis[ai] == 1) { h_perm[ai] = pos; ++pos; } else { h_perm[ai] = size + ai - pos; } } // Check the permutation // // for (int ai=0; ai<this->get_nrow(); ++ai) { // assert( h_perm[ai] >= 0 ); // assert( h_perm[ai] < this->get_nrow() ); // } cast_perm->Allocate(this->get_nrow()); cudaMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), cudaMemcpyHostToDevice); free_host(&h_row_offset); free_host(&h_col); free_host(&h_perm); free_host(&mis); return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MultiColoring(int &num_colors, int **size_colors, BaseVector<int> *permutation) const { assert(permutation != NULL); GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation); assert(cast_perm != NULL); // node colors (init value = 0 i.e. no color) int *color = NULL; int *h_row_offset = NULL; int *h_col = NULL; int size = this->get_nrow(); allocate_host(size, &color); allocate_host(this->get_nrow()+1, &h_row_offset); allocate_host(this->get_nnz(), &h_col); cudaMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), cudaMemcpyDeviceToHost); memset(color, 0, size*sizeof(int)); num_colors = 0; std::vector<bool> row_col; for (int ai=0; ai<this->get_nrow(); ++ai) { color[ai] = 1; row_col.clear(); row_col.assign(num_colors+2, false); for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj) if (ai != h_col[aj]) row_col[color[h_col[aj]]] = true; for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj) if (row_col[color[ai]] == true) ++color[ai]; if (color[ai] > num_colors) num_colors = color[ai]; } free_host(&h_row_offset); free_host(&h_col); allocate_host(num_colors, size_colors); set_to_zero_host(num_colors, *size_colors); int *offsets_color = NULL; allocate_host(num_colors, &offsets_color); memset(offsets_color, 0, sizeof(int)*num_colors); for (int i=0; i<this->get_nrow(); ++i) ++(*size_colors)[color[i]-1]; int total=0; for (int i=1; i<num_colors; ++i) { total += (*size_colors)[i-1]; offsets_color[i] = total; // LOG_INFO("offsets = " << total); } int *h_perm = NULL; allocate_host(this->get_nrow(), &h_perm); for (int i=0; i<this->get_nrow(); ++i) { h_perm[i] = offsets_color[ color[i]-1 ] ; ++offsets_color[color[i]-1]; } cast_perm->Allocate(this->get_nrow()); cudaMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), cudaMemcpyHostToDevice); free_host(&h_perm); free_host(&color); free_host(&offsets_color); return true; } template <> bool GPUAcceleratorMatrixCSR<double>::Scale(const double alpha) { if (this->get_nnz() > 0) { cublasStatus_t stat_t; stat_t = cublasDscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle), this->get_nnz(), &alpha, this->mat_.val, 1); CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::Scale(const float alpha) { if (this->get_nnz() > 0) { cublasStatus_t stat_t; stat_t = cublasSscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle), this->get_nnz(), &alpha, this->mat_.val, 1); CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ScaleDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_scale_diagonal<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::ScaleOffDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_scale_offdiagonal<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_diagonal<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarOffDiagonal(const ValueType alpha) { if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_offdiagonal<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::AddScalar(const ValueType alpha) { if (this->get_nnz() > 0) { int nnz = this->get_nnz(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nnz / this->local_backend_.GPU_block_size + 1); kernel_buffer_addscalar<ValueType, int> <<<GridSize, BlockSize>>> (nnz, alpha, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMult(const BaseVector<ValueType> &diag) { assert(diag.get_size() == this->get_ncol()); const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag) ; assert(cast_diag!= NULL); if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_diagmatmult<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, cast_diag->vec_, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMult_fromL(const BaseVector<ValueType> &diag) { assert(diag.get_size() == this->get_ncol()); const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag) ; assert(cast_diag!= NULL); if (this->get_nnz() > 0) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); cudaDeviceSynchronize(); kernel_csr_diagmatmult_fromL<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, cast_diag->vec_, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } return true; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SymbolicPower(const int p) { FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SymbolicMatMatMult(const BaseMatrix<ValueType> &src) { FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MatMatMult(const BaseMatrix<ValueType> &A, const BaseMatrix<ValueType> &B) { assert(A.get_ncol() == B.get_nrow()); assert(A.get_nrow() > 0); assert(B.get_ncol() > 0); assert(B.get_nrow() > 0); const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_A = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&A); const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_B = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&B); assert(cast_mat_A != NULL); assert(cast_mat_B != NULL); this->Clear(); int m = cast_mat_A->get_nrow(); int n = cast_mat_B->get_ncol(); int k = cast_mat_B->get_nrow(); int nnzC = 0; allocate_gpu(m+1, &this->mat_.row_offset); CHECK_CUDA_ERROR(__FILE__, __LINE__); cusparseStatus_t stat_t; stat_t = cusparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_POINTER_MODE_HOST); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseXcsrgemmNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, cast_mat_A->mat_descr_, cast_mat_A->get_nnz(), cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col, cast_mat_B->mat_descr_, cast_mat_B->get_nnz(), cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col, this->mat_descr_, this->mat_.row_offset, &nnzC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); allocate_gpu(nnzC, &this->mat_.col); CHECK_CUDA_ERROR(__FILE__, __LINE__); allocate_gpu(nnzC, &this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); this->nrow_ = m; this->ncol_ = n; this->nnz_ = nnzC; stat_t = __cusparseXcsrgemm__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, k, // A cast_mat_A->mat_descr_, cast_mat_A->get_nnz(), cast_mat_A->mat_.val, cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col, // B cast_mat_B->mat_descr_, cast_mat_B->get_nnz(), cast_mat_B->mat_.val, cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col, // C this->mat_descr_, this->mat_.val, this->mat_.row_offset, this->mat_.col); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); return true; } template <typename ValueType> void GPUAcceleratorMatrixCSR<ValueType>::SymbolicMatMatMult(const BaseMatrix<ValueType> &A, const BaseMatrix<ValueType> &B) { FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::Gershgorin(ValueType &lambda_min, ValueType &lambda_max) const { return false; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::MatrixAdd(const BaseMatrix<ValueType> &mat, const ValueType alpha, const ValueType beta, const bool structure) { if (this->get_nnz() > 0) { const GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat); assert(cast_mat != NULL); assert(cast_mat->get_nrow() == this->get_nrow()); assert(cast_mat->get_ncol() == this->get_ncol()); assert(this ->get_nnz() > 0); assert(cast_mat->get_nnz() > 0); if (structure == false) { int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_csr_add_csr_same_struct<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, cast_mat->mat_.row_offset, cast_mat->mat_.col, cast_mat->mat_.val, alpha, beta, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { // New structure with CUSPARSE routines int m = this->get_nrow(); int n = this->get_ncol(); int *csrRowPtrC = NULL; int *csrColC = NULL; ValueType *csrValC = NULL; int nnzC; allocate_gpu(m+1, &csrRowPtrC); cusparseStatus_t stat_t; cusparseMatDescr_t desc_mat_C = 0; stat_t = cusparseCreateMatDescr(&desc_mat_C); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatIndexBase(desc_mat_C, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetMatType(desc_mat_C, CUSPARSE_MATRIX_TYPE_GENERAL); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), CUSPARSE_POINTER_MODE_HOST); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseXcsrgeamNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), m, n, this->mat_descr_, this->get_nnz(), this->mat_.row_offset, this->mat_.col, cast_mat->mat_descr_, cast_mat->get_nnz(), cast_mat->mat_.row_offset, cast_mat->mat_.col, desc_mat_C, csrRowPtrC, &nnzC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); allocate_gpu(nnzC, &csrColC); allocate_gpu(nnzC, &csrValC); stat_t = __cusparseXcsrgeam__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), m, n, // A &alpha, this->mat_descr_, this->get_nnz(), this->mat_.val, this->mat_.row_offset, this->mat_.col, // B &beta, cast_mat->mat_descr_, cast_mat->get_nnz(), cast_mat->mat_.val, cast_mat->mat_.row_offset, cast_mat->mat_.col, // C desc_mat_C, csrValC, csrRowPtrC, csrColC); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); stat_t = cusparseDestroyMatDescr(desc_mat_C); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); this->Clear(); this->mat_.row_offset = csrRowPtrC; this->mat_.col = csrColC; this->mat_.val = csrValC; this->nrow_ = m; this->ncol_ = n; this->nnz_ = nnzC; } } return true; } template <typename ValueType> bool GPUAcceleratorMatrixCSR<ValueType>::Compress(const ValueType drop_off) { if (this->get_nnz() > 0) { GPUAcceleratorMatrixCSR<ValueType> tmp(this->local_backend_); tmp.CopyFrom(*this); int mat_nnz = 0; int *row_offset = NULL; allocate_gpu(this->get_nrow()+1, &row_offset); int *mat_row_offset = NULL; allocate_gpu(this->get_nrow()+1, &mat_row_offset); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, this->get_nrow()+1, row_offset); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(this->get_nrow() / this->local_backend_.GPU_block_size + 1); kernel_csr_compress_count_nrow<ValueType, int> <<<GridSize, BlockSize>>> (this->mat_.row_offset, this->mat_.col, this->mat_.val, this->get_nrow(), drop_off, row_offset); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO cum_sum<int, 256>(mat_row_offset, row_offset, this->get_nrow()); // get the new mat nnz cudaMemcpy(&mat_nnz, &mat_row_offset[this->get_nrow()], sizeof(int), cudaMemcpyDeviceToHost); this->AllocateCSR(mat_nnz, this->get_nrow(), this->get_ncol()); // TODO - just exchange memory pointers // copy row_offset cudaMemcpy(this->mat_.row_offset, mat_row_offset, (this->get_nrow()+1)*sizeof(int), cudaMemcpyDeviceToDevice); // copy col and val kernel_csr_compress_copy<ValueType, int> <<<GridSize, BlockSize>>> (tmp.mat_.row_offset, tmp.mat_.col, tmp.mat_.val, tmp.get_nrow(), drop_off, this->mat_.row_offset, this->mat_.col, this->mat_.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&row_offset); free_gpu(&mat_row_offset); } return true; } template <> bool GPUAcceleratorMatrixCSR<double>::Transpose(void) { if (this->get_nnz() > 0) { GPUAcceleratorMatrixCSR<double> tmp(this->local_backend_); tmp.CopyFrom(*this); this->Clear(); this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow()); cusparseStatus_t stat_t; stat_t = cusparseDcsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(), tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col, this->mat_.val, this->mat_.col, this->mat_.row_offset, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template <> bool GPUAcceleratorMatrixCSR<float>::Transpose(void) { if (this->get_nnz() > 0) { GPUAcceleratorMatrixCSR<float> tmp(this->local_backend_); tmp.CopyFrom(*this); this->Clear(); this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow()); cusparseStatus_t stat_t; stat_t = cusparseScsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle), tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(), tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col, this->mat_.val, this->mat_.col, this->mat_.row_offset, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO); CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__); } return true; } template class GPUAcceleratorMatrixCSR<double>; template class GPUAcceleratorMatrixCSR<float>; }
a87f539e6f28666fce3403fdb775b4403cbe99aa.hip
// !!! This is a file automatically generated by hipify!!! /* * The MIT License (MIT) * * Copyright (c) 2016 Philipp Trommler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <functional> // std::function #ifdef IMRESH_DEBUG # include <iostream> // std::cout, std::endl #endif #include <list> // std::list #include <mutex> // std::mutex #include <thread> // std::thread #include <utility> // std::pair #include <cassert> #include "algorithms/cuda/cudaShrinkWrap.h" #include "libs/cudacommon.h" // CUDA_ERROR namespace imresh { namespace io { /** * Struct containing a CUDA stream with it's associated device. */ struct stream { int device; hipStream_t str; }; /** * Mutex to coordinate device usage. */ std::mutex mtx; /** * List where all streams are stored as imresh::io::stream structs. */ std::list<stream> streamList; /** * List to store all created threads. */ std::list<std::thread> threadPool; /** * Maximum size of the thread pool. * * This is determined while imresh::io::fillStreamList() as the number of * available streams. */ unsigned int threadPoolMaxSize = 0; /** * Function to add a image processing task to the queue. * * This is called from taskQueue::addTask() as a thread to prevent blocking * and to ensure that all streams are filled with work. It selects the least * recently used stream from the streamList and fills it with new work (FIFO). * * A mutex ensures the correct work balancing over the CUDA streams. * However, this mutex doesn't include the call to the write out function. * If you need your write out function to be thread safe, you'll have to * use your own lock mechanisms inside of this function. * * @see addTask */ void addTaskAsync( float * _h_mem, std::pair<unsigned int,unsigned int> _size, std::function<void(float *,std::pair<unsigned int,unsigned int>, std::string)> _writeOutFunc, std::string _filename, unsigned int _numberOfCycles, unsigned int _numberOfHIOCycles, float _targetError, float _HIOBeta, float _intensityCutOffAutoCorel, float _intensityCutOff, float _sigma0, float _sigmaChange ) { // Lock the mutex so no other thread intermediatly changes the device // selection mtx.lock( ); // Get the next device and stream to use auto strm = streamList.front( ); streamList.pop_front( ); streamList.push_back( strm ); mtx.unlock( ); auto device = strm.device; auto str = strm.str; // Select device and copy memory CUDA_ERROR( hipSetDevice( device ) ); hipDeviceProp_t prop; CUDA_ERROR( hipGetDeviceProperties( &prop, device ) ); unsigned int const nThreadsPerBlock = 256; /* oversubscribe the GPU by a factor of 2 to account for hipMalloc * and hipMemcpy stalls */ unsigned int const nBlocks = 2*prop.maxThreadsPerMultiProcessor / nThreadsPerBlock; # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTaskAsync(): Mutex locked, device and stream selected. Calling shrink-wrap." << std::endl; # endif // Call shrinkWrap in the selected stream on the selected device. imresh::algorithms::cuda::cudaShrinkWrap( _h_mem, _size.first, _size.second, str, nBlocks, nThreadsPerBlock, _numberOfCycles, _targetError, _HIOBeta, _intensityCutOffAutoCorel, _intensityCutOff, _sigma0, _sigmaChange, _numberOfHIOCycles ); # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTaskAsync(): CUDA work finished, mutex unlocked. Calling write out function." << std::endl; # endif _writeOutFunc( _h_mem, _size, _filename ); } void addTask( float * _h_mem, std::pair<unsigned int,unsigned int> _size, std::function<void(float *,std::pair<unsigned int,unsigned int>, std::string)> _writeOutFunc, std::string _filename, unsigned int _numberOfCycles = 20, unsigned int _numberOfHIOCycles = 20, float _targetError = 0.00001f, float _HIOBeta = 0.9f, float _intensityCutOffAutoCorel = 0.04f, float _intensityCutOff = 0.2f, float _sigma0 = 3.0f, float _sigmaChange = 0.01f ) { assert( threadPoolMaxSize > 0 and "Did you make a call to taskQueueInit?" ); while( threadPool.size( ) >= threadPoolMaxSize ) { # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTask(): Too many active threads. Waiting for one of them to finish." << std::endl; # endif if ( threadPool.front( ).joinable( ) ) threadPool.front( ).join( ); else { # ifdef IMRESH_DEBUG std::cout << "[Warning] " << __FILE__ << " line " << __LINE__ << ": a thread from the thread pool is not joinable!\n"; # endif } threadPool.pop_front( ); } # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTask(): Creating working thread." << std::endl; # endif threadPool.push_back( std::thread( addTaskAsync, _h_mem, _size, _writeOutFunc, _filename, _numberOfCycles, _numberOfHIOCycles, _targetError, _HIOBeta, _intensityCutOffAutoCorel, _intensityCutOff, _sigma0, _sigmaChange ) ); } /** * This function adds all streams to the stream list. * * To achieve that it iterates over all available devices and creates one * stream for each multiprocessor on each device. All these streams are * stored in the streamList as imresh::io::stream objects. If no streams are * found, the program aborts. */ unsigned fillStreamList( ) { # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): Starting stream creation." << std::endl; # endif int deviceCount = 0; CUDA_ERROR( hipGetDeviceCount( & deviceCount ) ); if( deviceCount <= 0 ) { # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): No devices found. Aborting." << std::endl; # endif exit( EXIT_FAILURE ); } for( int i = 0; i < deviceCount; i++ ) { hipDeviceProp_t prop; CUDA_ERROR( hipSetDevice( i ) ); CUDA_ERROR( hipGetDeviceProperties( & prop, i ) ); assert( prop.multiProcessorCount >= 0 ); # ifdef IMRESH_DEBUG /* 0 makes no problems with the next for loop */ if( prop.multiProcessorCount <= 0 ) { std::cout << "[Warning] imresh::io::fillStreamList(): Devices has no multiprocessors. Ignoring this device." << std::endl; } # endif for( int j = 0; j < prop.multiProcessorCount; j++ ) { stream str; str.device = i; CUDA_ERROR( hipStreamCreate( & str.str ) ); streamList.push_back( str ); # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): Created stream " << j << " on device " << i << std::endl; # endif } } # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): Finished stream creation." << std::endl; # endif return streamList.size( ); } void taskQueueInit( ) { threadPoolMaxSize = fillStreamList( ); # ifdef IMRESH_DEBUG std::cout << "imresh::io::taskQueueInit(): Finished initilization." << std::endl; # endif } void taskQueueDeinit( ) { threadPoolMaxSize = 0; while( threadPool.size( ) > 0 ) { threadPool.front( ).join( ); threadPool.pop_front( ); } while( streamList.size( ) > 0 ) { CUDA_ERROR( hipStreamDestroy( streamList.front( ).str ) ); streamList.pop_front( ); } # ifdef IMRESH_DEBUG std::cout << "imresh::io::taskQueueDeinit(): Finished deinitilization." << std::endl; # endif } } // namespace io } // namespace imresh
a87f539e6f28666fce3403fdb775b4403cbe99aa.cu
/* * The MIT License (MIT) * * Copyright (c) 2016 Philipp Trommler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <functional> // std::function #ifdef IMRESH_DEBUG # include <iostream> // std::cout, std::endl #endif #include <list> // std::list #include <mutex> // std::mutex #include <thread> // std::thread #include <utility> // std::pair #include <cassert> #include "algorithms/cuda/cudaShrinkWrap.h" #include "libs/cudacommon.h" // CUDA_ERROR namespace imresh { namespace io { /** * Struct containing a CUDA stream with it's associated device. */ struct stream { int device; cudaStream_t str; }; /** * Mutex to coordinate device usage. */ std::mutex mtx; /** * List where all streams are stored as imresh::io::stream structs. */ std::list<stream> streamList; /** * List to store all created threads. */ std::list<std::thread> threadPool; /** * Maximum size of the thread pool. * * This is determined while imresh::io::fillStreamList() as the number of * available streams. */ unsigned int threadPoolMaxSize = 0; /** * Function to add a image processing task to the queue. * * This is called from taskQueue::addTask() as a thread to prevent blocking * and to ensure that all streams are filled with work. It selects the least * recently used stream from the streamList and fills it with new work (FIFO). * * A mutex ensures the correct work balancing over the CUDA streams. * However, this mutex doesn't include the call to the write out function. * If you need your write out function to be thread safe, you'll have to * use your own lock mechanisms inside of this function. * * @see addTask */ void addTaskAsync( float * _h_mem, std::pair<unsigned int,unsigned int> _size, std::function<void(float *,std::pair<unsigned int,unsigned int>, std::string)> _writeOutFunc, std::string _filename, unsigned int _numberOfCycles, unsigned int _numberOfHIOCycles, float _targetError, float _HIOBeta, float _intensityCutOffAutoCorel, float _intensityCutOff, float _sigma0, float _sigmaChange ) { // Lock the mutex so no other thread intermediatly changes the device // selection mtx.lock( ); // Get the next device and stream to use auto strm = streamList.front( ); streamList.pop_front( ); streamList.push_back( strm ); mtx.unlock( ); auto device = strm.device; auto str = strm.str; // Select device and copy memory CUDA_ERROR( cudaSetDevice( device ) ); cudaDeviceProp prop; CUDA_ERROR( cudaGetDeviceProperties( &prop, device ) ); unsigned int const nThreadsPerBlock = 256; /* oversubscribe the GPU by a factor of 2 to account for cudaMalloc * and cudaMemcpy stalls */ unsigned int const nBlocks = 2*prop.maxThreadsPerMultiProcessor / nThreadsPerBlock; # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTaskAsync(): Mutex locked, device and stream selected. Calling shrink-wrap." << std::endl; # endif // Call shrinkWrap in the selected stream on the selected device. imresh::algorithms::cuda::cudaShrinkWrap( _h_mem, _size.first, _size.second, str, nBlocks, nThreadsPerBlock, _numberOfCycles, _targetError, _HIOBeta, _intensityCutOffAutoCorel, _intensityCutOff, _sigma0, _sigmaChange, _numberOfHIOCycles ); # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTaskAsync(): CUDA work finished, mutex unlocked. Calling write out function." << std::endl; # endif _writeOutFunc( _h_mem, _size, _filename ); } void addTask( float * _h_mem, std::pair<unsigned int,unsigned int> _size, std::function<void(float *,std::pair<unsigned int,unsigned int>, std::string)> _writeOutFunc, std::string _filename, unsigned int _numberOfCycles = 20, unsigned int _numberOfHIOCycles = 20, float _targetError = 0.00001f, float _HIOBeta = 0.9f, float _intensityCutOffAutoCorel = 0.04f, float _intensityCutOff = 0.2f, float _sigma0 = 3.0f, float _sigmaChange = 0.01f ) { assert( threadPoolMaxSize > 0 and "Did you make a call to taskQueueInit?" ); while( threadPool.size( ) >= threadPoolMaxSize ) { # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTask(): Too many active threads. Waiting for one of them to finish." << std::endl; # endif if ( threadPool.front( ).joinable( ) ) threadPool.front( ).join( ); else { # ifdef IMRESH_DEBUG std::cout << "[Warning] " << __FILE__ << " line " << __LINE__ << ": a thread from the thread pool is not joinable!\n"; # endif } threadPool.pop_front( ); } # ifdef IMRESH_DEBUG std::cout << "imresh::io::addTask(): Creating working thread." << std::endl; # endif threadPool.push_back( std::thread( addTaskAsync, _h_mem, _size, _writeOutFunc, _filename, _numberOfCycles, _numberOfHIOCycles, _targetError, _HIOBeta, _intensityCutOffAutoCorel, _intensityCutOff, _sigma0, _sigmaChange ) ); } /** * This function adds all streams to the stream list. * * To achieve that it iterates over all available devices and creates one * stream for each multiprocessor on each device. All these streams are * stored in the streamList as imresh::io::stream objects. If no streams are * found, the program aborts. */ unsigned fillStreamList( ) { # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): Starting stream creation." << std::endl; # endif int deviceCount = 0; CUDA_ERROR( cudaGetDeviceCount( & deviceCount ) ); if( deviceCount <= 0 ) { # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): No devices found. Aborting." << std::endl; # endif exit( EXIT_FAILURE ); } for( int i = 0; i < deviceCount; i++ ) { cudaDeviceProp prop; CUDA_ERROR( cudaSetDevice( i ) ); CUDA_ERROR( cudaGetDeviceProperties( & prop, i ) ); assert( prop.multiProcessorCount >= 0 ); # ifdef IMRESH_DEBUG /* 0 makes no problems with the next for loop */ if( prop.multiProcessorCount <= 0 ) { std::cout << "[Warning] imresh::io::fillStreamList(): Devices has no multiprocessors. Ignoring this device." << std::endl; } # endif for( int j = 0; j < prop.multiProcessorCount; j++ ) { stream str; str.device = i; CUDA_ERROR( cudaStreamCreate( & str.str ) ); streamList.push_back( str ); # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): Created stream " << j << " on device " << i << std::endl; # endif } } # ifdef IMRESH_DEBUG std::cout << "imresh::io::fillStreamList(): Finished stream creation." << std::endl; # endif return streamList.size( ); } void taskQueueInit( ) { threadPoolMaxSize = fillStreamList( ); # ifdef IMRESH_DEBUG std::cout << "imresh::io::taskQueueInit(): Finished initilization." << std::endl; # endif } void taskQueueDeinit( ) { threadPoolMaxSize = 0; while( threadPool.size( ) > 0 ) { threadPool.front( ).join( ); threadPool.pop_front( ); } while( streamList.size( ) > 0 ) { CUDA_ERROR( cudaStreamDestroy( streamList.front( ).str ) ); streamList.pop_front( ); } # ifdef IMRESH_DEBUG std::cout << "imresh::io::taskQueueDeinit(): Finished deinitilization." << std::endl; # endif } } // namespace io } // namespace imresh
3c4fe9e82d41d32acb0d1e08140897b8836a715b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2019-2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <limits> #include "minimizer.hpp" #include <thrust/scan.h> #include <thrust/execution_policy.h> namespace claraparabricks { namespace genomeworks { namespace cudamapper { Minimizer::Minimizer(representation_t representation, position_in_read_t position_in_read, DirectionOfRepresentation direction, read_id_t read_id) : representation_(representation) , position_in_read_(position_in_read) , direction_(direction) , read_id_(read_id) { } representation_t Minimizer::representation() const { return representation_; } position_in_read_t Minimizer::position_in_read() const { return position_in_read_; } read_id_t Minimizer::read_id() const { return read_id_; } /// \brief Apply a hash function to a representation /// /// Because of the non-Poisson distribuition of DNA, some common sequences with common kmer-content (e.g long poly-A runs) /// may be over-represented in sketches. By applying a hash function, kmers are mapped to representations over /// a more uniform space. The hash function implemented here was developed by Thomas Wang and is described /// [here](https://gist.github.com/badboy/6267743). A mask is applied to the output so that all representations are mapped /// to a 32 bit space. /// /// \param key the input representation __device__ representation_t wang_hash64(representation_t key) { uint64_t mask = (uint64_t(1) << 32) - 1; key = (~key + (key << 21)) & mask; key = key ^ key >> 24; key = ((key + (key << 3)) + (key << 8)) & mask; key = key ^ key >> 14; key = ((key + (key << 2)) + (key << 4)) & mask; key = key ^ key >> 28; key = (key + (key << 31)) & mask; return key; } Minimizer::DirectionOfRepresentation Minimizer::direction() const { return direction_; } /// \brief finds front end minimizers /// /// Finds the minimizers of windows starting at position 0 and having window size range from 1 to window_size-1 /// /// \param minimizer_size kmer length /// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for front end minimizers window size actually varies from 1 to window_size-1) /// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on /// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read /// \param window_minimizers_representation output array of representations of minimizers, grouped by reads /// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads /// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition) /// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially zero) __global__ void find_front_end_minimizers(const std::uint64_t minimizer_size, const std::uint64_t window_size, const char* const basepairs, const ArrayBlock* const read_id_to_basepairs_section, representation_t* const window_minimizers_representation, char* const window_minimizers_direction, position_in_read_t* const window_minimizers_position_in_read, const ArrayBlock* const read_id_to_windows_section, std::int64_t* const read_id_to_minimizers_written, const bool hash_representations) { // TODO: simplify this method similarly to find_back_end_minimizers if (1 == window_size) { // if 1 == window_size there are no end minimizer return; } const auto input_array_first_element = read_id_to_basepairs_section[blockIdx.x].first_element_; const auto output_arrays_offset = read_id_to_windows_section[blockIdx.x].first_element_; // Dynamically allocating shared memory and assigning parts of it to different pointers // Everything is 8-byte alligned extern __shared__ std::uint64_t sm[]; // TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory // TODO: use sizeof to get the number of bytes std::uint32_t shared_memory_64_bit_elements_already_taken = 0; char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8; char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8; representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements shared_memory_64_bit_elements_already_taken += blockDim.x - (minimizer_size - 1); char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - forward, 1 - reverse shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 7) / 8; position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2; position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - same, 1 - different shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2; representation_t* minimizer_representation_of_largest_window_from_previous_step = (&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += 1; position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; // local index = index in section of the output array dedicated to this read position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; // TODO: Move to constant memory char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements if (0 == threadIdx.x) { forward_to_reverse_complement[0b000] = 0b0000; forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100) forward_to_reverse_complement[0b010] = 0b0000; forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111) forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1) forward_to_reverse_complement[0b101] = 0b0000; forward_to_reverse_complement[0b110] = 0b0000; forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11) } __syncthreads(); // Each thread loads one basepair, making it blockDim.x basepairs. Each kmer has minimizer_size elements // Number of windows is equal to the number of kmers for end minimizer // This means a total of blockDim.x - (minimizer_size - 1) kmer can be processed in one block, where each kmer is shifted by one one basepair compared to the previous kmer // For blockDim.x = 6 and minimizer_size = 3 there are 6 - (3 - 1) = 4 kmers // 0 1 2 // 1 2 3 // 2 3 4 // 3 4 5 // If more minimizers have to be processed a new step is needed, in this case meaning // 4 5 6 // 5 6 7 // 6 7 8 // 7 8 9 // This means that a number of basepairs is loaded twice, but this a tradeoff for less complex code const std::uint16_t windows_per_loop_step = blockDim.x - (minimizer_size - 1); *minimizer_representation_of_largest_window_from_previous_step = 0; *minimizer_position_in_read_of_largest_window_from_previous_step = 0; *local_index_to_write_next_minimizer_to = 0; for (std::uint32_t first_element_in_step = 0; first_element_in_step < window_size - 1; first_element_in_step += windows_per_loop_step) { // load basepairs into shared memory and calculate the lexical ordering hash if (first_element_in_step + threadIdx.x < window_size - 1 + minimizer_size - 1) { // window_size - 1 + minimizer_size - 1 -> total number of basepairs needed for all front minimizers const char bp = basepairs[input_array_first_element + first_element_in_step + threadIdx.x]; forward_basepair_hashes[threadIdx.x] = 0b11 & (bp >> 2 ^ bp >> 1); reverse_basepair_hashes[threadIdx.x] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1); } __syncthreads(); // First front end window covers only one minimizer (the one starting at positon 0), second minimizers starting at 0 and 1 and so one until the window which covers window_size-1 minimizers // For window_size = 7 and minimize_size = 3 this means: // window 0: 0 1 2 (0 1 2) // window 1: 0 1 2 3 (0 1 2; 1 2 3) // widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4) // window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5) // window 4: 0 1 2 3 4 5 6 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6) // window 5: 0 1 2 3 4 5 6 7 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6; 5 6 7) // If window_size > windows_per_loop_step the other windows have to be processed in other loop steps // For example, for blockDim.x = 6, minimizer_size = 3 (=> windows_per_loop_step = 4) and window_size = 7: // step 0 (first_element_in_step = 0): // window 0: 0 1 2 (0 1 2) // window 1: 0 1 2 3 (0 1 2; 1 2 3) // widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4) // window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5) // step 1 (first_element_in_step = 4): // window 4: 0 1 2 3 4 5 6 (take results for window 3 and add: 4 5 6) // window 5: 0 1 2 3 4 5 6 7 (take results for window 3 and add: 4 5 6; 5 6 7) // This means that a thread has a window assigned to it when thraedIdx.x < minimizers_per_loop (for all loops other than the last one) and // when first_element_in_step + threadIdx.x < window_size - 1 const bool thread_assigned_to_a_window = first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step; // calculate minimizer for each kmer in front end windows if (thread_assigned_to_a_window) { // largest front minimizer window starts at basepar 0 and goes up to window_size -1 representation_t forward_representation = 0; representation_t reverse_representation = 0; // TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element for (std::uint16_t i = 0; i < minimizer_size; ++i) { forward_representation |= forward_basepair_hashes[threadIdx.x + i] << 2 * (minimizer_size - i - 1); reverse_representation |= reverse_basepair_hashes[threadIdx.x + i] << 2 * i; } if (hash_representations) { forward_representation = wang_hash64(forward_representation); reverse_representation = wang_hash64(reverse_representation); } if (forward_representation <= reverse_representation) { minimizers_representation[threadIdx.x] = forward_representation; minimizers_direction[threadIdx.x] = 0; } else { minimizers_representation[threadIdx.x] = reverse_representation; minimizers_direction[threadIdx.x] = 1; } } __syncthreads(); representation_t window_minimizer_representation = 0; position_in_read_t window_minimizer_position_in_read = 0; // calculate minimizer for each window // Start by the value of the first minimizer and iteratively compare it with the other minimizers in the window // If first_element_in_step != 0 there is no need to go through all minimizers in the window. One can take the minimizer of window first_element_in_step-1 // as the current window would check exaclty the same minimizers before checking minimizer first_element_in_step if (thread_assigned_to_a_window) { if (first_element_in_step != 0) { window_minimizer_representation = *minimizer_representation_of_largest_window_from_previous_step; window_minimizer_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step; if (minimizers_representation[0] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[0]; window_minimizer_position_in_read = first_element_in_step; } } else { window_minimizer_representation = minimizers_representation[0]; window_minimizer_position_in_read = 0; } // All threads have to wait for the largest block to finish. Probably no better solution without big restructuring // If there are several minimizers with the same representation only save the latest one (thus <=), others will be covered by smaller windows for (std::uint16_t i = 1; i <= threadIdx.x; ++i) { if (minimizers_representation[i] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[i]; window_minimizer_position_in_read = first_element_in_step + i; } } minimizers_position_in_read[threadIdx.x] = window_minimizer_position_in_read; } __syncthreads(); // only write first occurence of each minimizer to the output array // Hash of the last kmer in a window can be a minimizer only if it is smaller or equal than the minimizer of the previous window // That means that the minimizer of the current window should only be written if it is different than the one of the previous window // Otherwise it it the same minimizer and there is no need write to the the output array // Imagine that hash representation of windows are are follows (the number in the parentheses marks the position of the last occurance of the minimizer with that representation): // 8, 89, 898, 8987, 89878, 898785, 8987856, 89878562 // Minimizers of these windows are // 8(0) 8(0) 8(2) 7(3) 7(3) 5(5) 5(5) 2(7) // If we use 1 to indicate the first occurence of minimizer and 0 for repretition we get // 1 0 1 1 0 1 0 1 // If we do an an inclusive scan on this array we get the indices to which the unique minimizers should be written to (plus one) // 1 1 2 3 3 4 4 5 // From this it's clear that only the windows whose value is larger than the one of its neighbor should write its minimizer and it should write to the element with the index of value-1 if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step) { if (0 == first_element_in_step && 0 == threadIdx.x) { // minimizer of first window is unique for sure as it has no left neighbor different_minimizer_than_neighbors[0] = 1; } else { representation_t neighbors_minimizers_position_in_read = 0; // find left neighbor's window minimizer's position in read if (0 == threadIdx.x) { neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step; } else { // TODO: consider using warp shuffle instead of shared memory neighbors_minimizers_position_in_read = minimizers_position_in_read[threadIdx.x - 1]; } // check if it's the same minimizer if (neighbors_minimizers_position_in_read == minimizers_position_in_read[threadIdx.x]) { different_minimizer_than_neighbors[threadIdx.x] = 0; } else { different_minimizer_than_neighbors[threadIdx.x] = 1; } } } __syncthreads(); // if there are more loop steps to follow write the value and position of minimizer of the largest window if (first_element_in_step + windows_per_loop_step < window_size - 1 && threadIdx.x == windows_per_loop_step - 1) { *minimizer_representation_of_largest_window_from_previous_step = window_minimizer_representation; *minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read; } // no need to sync, these two values are not used before the next sync // perform inclusive scan // different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one" // TODO: implement it using warp shuffle or use CUB if (0 == threadIdx.x) { std::uint16_t i = 0; different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to; for (i = 1; i < blockDim.x - (minimizer_size - 1); ++i) { different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1]; } } __syncthreads(); // now save minimizers to output array if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step) { const std::uint32_t neighbors_write_index = 0 == threadIdx.x ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[threadIdx.x - 1]; if (neighbors_write_index < different_minimizer_than_neighbors[threadIdx.x]) { const std::uint64_t output_index = output_arrays_offset + different_minimizer_than_neighbors[threadIdx.x] - 1; window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[threadIdx.x] - first_element_in_step]; window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[threadIdx.x] - first_element_in_step]; window_minimizers_position_in_read[output_index] = minimizers_position_in_read[threadIdx.x]; } } __syncthreads(); // index (plus one) to which the last window minimizer was written is the number of all unique front end window minimizers if (first_element_in_step + threadIdx.x == window_size - 1 - 1) { // "plus one" is already included in different_minimizer_than_neighbors as it was created by an inclusive scan read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[threadIdx.x]; } // if there are more loop steps to follow write the output array index of last minimizer in this loop step if (first_element_in_step + windows_per_loop_step <= window_size - 1 && threadIdx.x == windows_per_loop_step - 1) { *local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[threadIdx.x]; } } } /// \brief finds central minimizers /// /// Finds the minimizers of windows of size window_size starting at position 0 and moving by one basepair at a time /// /// \param minimizer_size kmer length /// \param window_size number of kmers in one window, kmers being shifted by one one basepair each /// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on /// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read /// \param window_minimizers_representation output array of representations of minimizers, grouped by reads /// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads /// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition) /// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end minimizers) __global__ void find_central_minimizers(const std::uint64_t minimizer_size, const std::uint64_t window_size, const std::uint32_t basepairs_per_thread, const char* const basepairs, const ArrayBlock* const read_id_to_basepairs_section, representation_t* const window_minimizers_representation, char* const window_minimizers_direction, position_in_read_t* const window_minimizers_position_in_read, const ArrayBlock* const read_id_to_windows_section, std::int64_t* const read_id_to_minimizers_written, const bool hash_representations) { // See find_front_end_minimizers for more details about the algorithm const std::uint64_t index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_; // Index of the element to which the first central minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads const std::uint64_t output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + read_id_to_minimizers_written[blockIdx.x]; const std::uint32_t basepairs_in_read = read_id_to_basepairs_section[blockIdx.x].block_size_; const std::uint32_t kmers_in_read = basepairs_in_read - (minimizer_size - 1); const std::uint32_t windows_in_read = kmers_in_read - (window_size - 1); const std::uint16_t basepairs_per_loop_step = blockDim.x * basepairs_per_thread; const std::uint16_t kmers_per_loop_step = basepairs_per_loop_step - (minimizer_size - 1); const std::uint16_t windows_per_loop_step = kmers_per_loop_step - (window_size - 1); // Dynamically allocating shared memory and assigning parts of it to different pointers // Everything is 8-byte alligned extern __shared__ std::uint64_t sm[]; // TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory // TODO: use sizeof to get the number of bytes std::uint32_t shared_memory_64_bit_elements_already_taken = 0; char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8; char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8; representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_loop_step elements shared_memory_64_bit_elements_already_taken += kmers_per_loop_step; char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - forward, 1 - reverse shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 7) / 8; position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2; position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - same, 1 - different shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2; position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; // TODO: Move to constant memory char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements if (0 == threadIdx.x) { forward_to_reverse_complement[0b000] = 0b0000; forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100) forward_to_reverse_complement[0b010] = 0b0000; forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111) forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1) forward_to_reverse_complement[0b101] = 0b0000; forward_to_reverse_complement[0b110] = 0b0000; forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11) } __syncthreads(); // if there are front minimizers take them into account if (0 != read_id_to_minimizers_written[blockIdx.x]) { *minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1]; *local_index_to_write_next_minimizer_to = read_id_to_minimizers_written[blockIdx.x]; } else { *minimizer_position_in_read_of_largest_window_from_previous_step = 0; // N/A *local_index_to_write_next_minimizer_to = 0; } for (std::uint32_t first_element_in_step = 0; first_element_in_step < windows_in_read; first_element_in_step += windows_per_loop_step) { // load basepairs into shared memory and calculate the lexical ordering hash for (std::uint32_t basepair_index = threadIdx.x; basepair_index < basepairs_per_loop_step && first_element_in_step + basepair_index < basepairs_in_read; basepair_index += blockDim.x) { const char bp = basepairs[index_of_first_element_to_process_global + first_element_in_step + basepair_index]; forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1); reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1); } __syncthreads(); // calculate kmer minimizers for (std::uint32_t kmer_index = threadIdx.x; kmer_index < kmers_per_loop_step && first_element_in_step + kmer_index < kmers_in_read; kmer_index += blockDim.x) { representation_t forward_representation = 0; representation_t reverse_representation = 0; // TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element for (std::uint16_t i = 0; i < minimizer_size; ++i) { forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1); reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i; } if (hash_representations) { forward_representation = wang_hash64(forward_representation); reverse_representation = wang_hash64(reverse_representation); } if (forward_representation <= reverse_representation) { minimizers_representation[kmer_index] = forward_representation; minimizers_direction[kmer_index] = 0; } else { minimizers_representation[kmer_index] = reverse_representation; minimizers_direction[kmer_index] = 1; } } __syncthreads(); position_in_read_t window_minimizer_position_in_read = 0; // find window minimizer for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x) { // assume that the minimizer of the first kmer in step is the window minimizer representation_t window_minimizer_representation = minimizers_representation[window_index]; window_minimizer_position_in_read = first_element_in_step + window_index; // now check the minimizers of all other windows for (std::uint16_t i = 1; i < window_size; ++i) { if (minimizers_representation[window_index + i] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[window_index + i]; window_minimizer_position_in_read = first_element_in_step + window_index + i; } } minimizers_position_in_read[window_index] = window_minimizer_position_in_read; } __syncthreads(); // check if the window to the left has a the same minimizer for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x) { // if this is the first window in read and there were no front end minimizers than this is the first occurence of this minimizer if (0 == first_element_in_step + window_index && 0 == read_id_to_minimizers_written[blockIdx.x]) { different_minimizer_than_neighbors[0] = 1; } else { representation_t neighbors_minimizers_position_in_read = 0; // find left neighbor's window minimizer's position in read if (0 == window_index) { neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step; } else { // TODO: consider using warp shuffle instead of shared memory neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1]; } // check if it's the same minimizer if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index]) { different_minimizer_than_neighbors[window_index] = 0; } else { different_minimizer_than_neighbors[window_index] = 1; } } } __syncthreads(); // if there are more loop steps to follow write the position of minimizer of the last window // "windows_per_loop_step % blockDim.x - 1" determines the thread which processes the last window if (first_element_in_step + windows_per_loop_step < windows_in_read && threadIdx.x == windows_per_loop_step % blockDim.x - 1) { *minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read; } // perform inclusive scan // different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one" // TODO: implement it using warp shuffle or use CUB if (0 == threadIdx.x) { std::uint16_t i = 0; different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to; for (i = 1; i < windows_per_loop_step && first_element_in_step + i < windows_in_read; ++i) { different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1]; } } __syncthreads(); // now save minimizers to output array for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x) { // if first_element_in_loop == 0 and window_index == 0 then *local_index_to_write_next_minimizer_to is set to 0 before entering the loop const std::uint32_t neighbors_write_index = 0 == window_index ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[window_index - 1]; if (neighbors_write_index < different_minimizer_than_neighbors[window_index]) { // output array offset added in inclusive sum const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1; window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - first_element_in_step]; window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - first_element_in_step]; window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index]; } } __syncthreads(); // increase the number of written minimizers by the number of central minimizers // the value is increased by the write index of the last window in read if (first_element_in_step + windows_per_loop_step >= windows_in_read && 0 == threadIdx.x) { // only do it when there is not going to be new loop step read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[windows_in_read - first_element_in_step - 1]; // write the index of the last window } // if there are more loop steps to follow write the output array index of the last minimizer in this loop step if (first_element_in_step + windows_per_loop_step < windows_in_read && 0 == threadIdx.x) { *local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[windows_per_loop_step - 1]; // index of last written minimizer + 1 } } } /// \brief finds back end minimizers /// /// Finds the minimizers of windows ending end the last basepair and having window size range from 1 to window_size-1 /// /// \param minimizer_size kmer length /// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for back end minimizers window size actually varies from 1 to window_size-1) /// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on /// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read /// \param window_minimizers_representation output array of representations of minimizers, grouped by reads /// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads /// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition) /// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end and central minimizers) __global__ void find_back_end_minimizers(const std::uint64_t minimizer_size, const std::uint64_t window_size, const char* const basepairs, const ArrayBlock* const read_id_to_basepairs_section, representation_t* const window_minimizers_representation, char* const window_minimizers_direction, position_in_read_t* const window_minimizers_position_in_read, const ArrayBlock* const read_id_to_windows_section, std::int64_t* const read_id_to_minimizers_written, const bool hash_representations) { // See find_front_end_minimizers for more details about the algorithm if (1 == window_size) { // if 1 == window_size there are no end minimizer return; } // Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the read const auto index_of_first_element_to_process_local = read_id_to_basepairs_section[blockIdx.x].block_size_ - (window_size - 1 + minimizer_size - 1); // Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the whole array of basepairs for all reads const auto index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_ + index_of_first_element_to_process_local; // Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the section dedicate to this read const auto output_index_to_write_the_first_minimizer_local = read_id_to_minimizers_written[blockIdx.x]; // Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads const auto output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + output_index_to_write_the_first_minimizer_local; // Dynamically allocating shared memory and assigning parts of it to different pointers // Everything is 8-byte alligned extern __shared__ std::uint64_t sm[]; // TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory // TODO: use sizeof to get the number of bytes std::uint32_t shared_memory_64_bit_elements_already_taken = 0; char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8; char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8; representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements shared_memory_64_bit_elements_already_taken += window_size - 1; char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements; 0 - forward, 1 - reverse shared_memory_64_bit_elements_already_taken += (window_size - 1 + 7) / 8; position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2; position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements; 0 - same, 1 - different shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2; // TODO: Move to constant memory char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements if (0 == threadIdx.x) { forward_to_reverse_complement[0b000] = 0b0000; forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100) forward_to_reverse_complement[0b010] = 0b0000; forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111) forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1) forward_to_reverse_complement[0b101] = 0b0000; forward_to_reverse_complement[0b110] = 0b0000; forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11) } __syncthreads(); // There are only window_size-1 back end windows. window_size usually has the value of a few dozens // Having windows_size so large that it does not fit the shared memory is unlikely // If that happens implement this method similarly to find_central_minimizers // load basepairs into shared memory and calculate the lexical ordering hash for (std::uint16_t basepair_index = threadIdx.x; basepair_index < window_size - 1 + minimizer_size - 1; basepair_index += blockDim.x) { const char bp = basepairs[index_of_first_element_to_process_global + basepair_index]; forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1); reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1); } __syncthreads(); // calculate kmer minimizers // For back end minimizers the number of kmers is the same as the number of windows for (std::uint16_t kmer_index = threadIdx.x; kmer_index < window_size - 1; kmer_index += blockDim.x) { representation_t forward_representation = 0; representation_t reverse_representation = 0; // TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element for (std::uint16_t i = 0; i < minimizer_size; ++i) { forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1); reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i; } if (hash_representations) { forward_representation = wang_hash64(forward_representation); reverse_representation = wang_hash64(reverse_representation); } if (forward_representation <= reverse_representation) { minimizers_representation[kmer_index] = forward_representation; minimizers_direction[kmer_index] = 0; } else { minimizers_representation[kmer_index] = reverse_representation; minimizers_direction[kmer_index] = 1; } } __syncthreads(); // find window minimizer for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x) { // assume that the first kmer in the window is the minimizer representation_t window_minimizer_representation = minimizers_representation[window_index]; position_in_read_t window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index; // now check other kmers in the window (note that this the back end minimizer, so not all windows have the same length) for (std::uint16_t i = 1; window_index + i < window_size - 1; ++i) { if (minimizers_representation[window_index + i] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[window_index + i]; window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index + i; } } minimizers_position_in_read[window_index] = window_minimizer_position_in_read; } __syncthreads(); // check if the window to the left has a the same minimizer for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x) { representation_t neighbors_minimizers_position_in_read = 0; // find left neighbor's window minimizer's position in read if (0 == window_index) { // if this is the first window take the position of the minimizer of the last central minimizer neighbors_minimizers_position_in_read = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1]; } else { // TODO: consider using warp shuffle instead of shared memory neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1]; } // check if it's the same minimizer if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index]) { different_minimizer_than_neighbors[window_index] = 0; } else { different_minimizer_than_neighbors[window_index] = 1; } } __syncthreads(); // perform inclusive scan // different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one" // TODO: implement it using warp shuffle or use CUB if (0 == threadIdx.x) { // read_id_to_minimizers_written[blockIdx.x] is the index of the last written plus one different_minimizer_than_neighbors[0] += output_index_to_write_the_first_minimizer_local; for (std::uint16_t i = 1; i < window_size - 1; ++i) { different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1]; } } __syncthreads(); // now save minimizers to output array for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x) { // different_minimizer_than_neighbors contians an inclusive scan, i.e. it's index_to_write_to + 1 const std::uint32_t neighbors_write_index = 0 == window_index ? output_index_to_write_the_first_minimizer_local : different_minimizer_than_neighbors[window_index - 1]; if (neighbors_write_index < different_minimizer_than_neighbors[window_index]) { // to get the actual index to write to do -1 to different_minimizer_than_neighbors const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1; // substract index_of_first_element_to_process_local to get the index in shared memory window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local]; window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local]; window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index]; } } __syncthreads(); // save the write index of the last written minimizer if (0 == threadIdx.x) { read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[window_size - 1 - 1]; } } /// \brief packs minimizers of different reads together /// /// window_minimizers_representation, window_minimizers_position_in_read and window_minimizers_direction all allocate one element for each window in the read. /// Many windows share the same minimizer and each minimizer is written only once, meaning many elements do not contain minimizers. /// This function creates new arrays where such elements do not exist. /// Note that in the input arrays all minimizers of one read are written consecutively, i.e. [read_0 minimizers], [read_0 junk], [read_1 minimizers], [read_1 junk], [read_2 minimizers]... /// /// \param window_minimizers_representation array of representations of minimizers, grouped by reads /// \param window_minimizers_position_in_read array of positions in read of minimizers, grouped by reads /// \param window_minimizers_direction array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param read_id_to_windows_section index of first element dedicated to that read in input arrays and the number of dedicated elements /// \param representations_compressed array of representations of minimizers, grouped by reads, without invalid elements between the reads /// \param rest_compressed array of read_ids, positions_in_read and directions of reads, grouped by reads, without invalid elements between the reads /// \param read_id_to_compressed_minimizers index of the first minimizer of the next read (array comes from an inclusive scan, hence all indices are shifted by one) /// \param read_id_of_first_read __global__ void compress_minimizers(const representation_t* const window_minimizers_representation, const position_in_read_t* const window_minimizers_position_in_read, const char* const window_minimizers_direction, const ArrayBlock* const read_id_to_windows_section, representation_t* const representations_compressed, Minimizer::ReadidPositionDirection* const rest_compressed, const std::int64_t* const read_id_to_compressed_minimizers, std::uint32_t read_id_of_first_read) { const auto first_input_minimizer = read_id_to_windows_section[blockIdx.x].first_element_; // elements have the index of read_id+1, i.e. everything is shifted by one const auto first_output_minimizer = blockIdx.x == 0 ? 0 : read_id_to_compressed_minimizers[blockIdx.x - 1]; const auto number_of_minimizers = read_id_to_compressed_minimizers[blockIdx.x] - first_output_minimizer; for (std::uint32_t i = threadIdx.x; i < number_of_minimizers; i += blockDim.x) { representations_compressed[first_output_minimizer + i] = window_minimizers_representation[first_input_minimizer + i]; rest_compressed[first_output_minimizer + i].read_id_ = blockIdx.x + read_id_of_first_read; rest_compressed[first_output_minimizer + i].position_in_read_ = window_minimizers_position_in_read[first_input_minimizer + i]; rest_compressed[first_output_minimizer + i].direction_ = window_minimizers_direction[first_input_minimizer + i]; } } Minimizer::GeneratedSketchElements Minimizer::generate_sketch_elements(DefaultDeviceAllocator allocator, const std::uint64_t number_of_reads_to_add, const std::uint64_t minimizer_size, const std::uint64_t window_size, const std::uint64_t read_id_of_first_read, const device_buffer<char>& merged_basepairs_d, const std::vector<ArrayBlock>& read_id_to_basepairs_section_h, const device_buffer<ArrayBlock>& read_id_to_basepairs_section_d, const bool hash_representations, const hipStream_t cuda_stream) { GW_NVTX_RANGE(profiler, "Minimizer::generate_sketch_elements"); // for each read find the maximum number of minimizers (one per window), determine their section in the minimizer arrays and allocate the arrays std::uint64_t total_windows = 0; std::vector<ArrayBlock> read_id_to_windows_section_h(number_of_reads_to_add, {0, 0}); for (read_id_t read_id = 0; read_id < number_of_reads_to_add; ++read_id) { read_id_to_windows_section_h[read_id].first_element_ = total_windows; std::uint32_t windows = window_size - 1; // front end minimizers windows += read_id_to_basepairs_section_h[read_id].block_size_ - (minimizer_size + window_size - 1) + 1; // central minimizers windows += window_size - 1; read_id_to_windows_section_h[read_id].block_size_ = windows; total_windows += windows; } device_buffer<decltype(read_id_to_windows_section_h)::value_type> read_id_to_windows_section_d(read_id_to_windows_section_h.size(), allocator, cuda_stream); cudautils::device_copy_n_async(read_id_to_windows_section_h.data(), read_id_to_windows_section_h.size(), read_id_to_windows_section_d.data(), cuda_stream); // H2D device_buffer<representation_t> window_minimizers_representation_d(total_windows, allocator, cuda_stream); device_buffer<char> window_minimizers_direction_d(total_windows, allocator, cuda_stream); device_buffer<position_in_read_t> window_minimizers_position_in_read_d(total_windows, allocator, cuda_stream); device_buffer<std::int64_t> read_id_to_minimizers_written_d(number_of_reads_to_add, allocator, cuda_stream); // initially there are no minimizers written to the output arrays // TODO: is this needed? GW_CU_CHECK_ERR(hipMemsetAsync(read_id_to_minimizers_written_d.data(), 0, number_of_reads_to_add * sizeof(std::int64_t), cuda_stream)); // *** front end minimizers *** std::uint32_t num_of_basepairs_for_front_minimizers = (window_size - 1) + minimizer_size - 1; std::uint32_t num_of_threads = ::min(num_of_basepairs_for_front_minimizers, 64u); // largest window in end minimizers has the size of window_size-1, meaning it covers window_size-1 + minimizer_size - 1 basepairs const std::uint32_t basepairs_for_end_minimizers = (window_size - 1 + minimizer_size - 1); const std::uint32_t kmers_for_end_minimizers = window_size - 1; // for end minimizers number of kmers is the as the number of windows because the last window has only one kmer const std::uint32_t windows_for_end_minimizers = window_size - 1; // determine total ammount for shared memory needed (see kernel for clarification) // shared memeory is alligned to 8 bytes, so for 1-byte variables (x+7)/8 values are allocate (for 10 1-byte elements (10+7)/8=17/8=2 8-byte elements are allocated, instead of 10/1=1 which would be wrong) // the final number of allocated 8-byte values is multiplied by 8 at the end in order to get number of bytes needed std::uint32_t shared_memory_for_kernel = 0; shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char) shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char) shared_memory_for_kernel += (kmers_for_end_minimizers); // representations of minimizers (representation_t) shared_memory_for_kernel += (windows_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor (position_in_read_t) shared_memory_for_kernel += 1; // representation from previous step shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char) shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t) shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char) shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes std::string msg = "Launching find_front_end_minimizers with " + std::to_string(shared_memory_for_kernel) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); hipLaunchKernelGGL(( find_front_end_minimizers), dim3(number_of_reads_to_add), dim3(num_of_threads), shared_memory_for_kernel, cuda_stream, minimizer_size, window_size, merged_basepairs_d.data(), read_id_to_basepairs_section_d.data(), window_minimizers_representation_d.data(), window_minimizers_direction_d.data(), window_minimizers_position_in_read_d.data(), read_id_to_windows_section_d.data(), read_id_to_minimizers_written_d.data(), hash_representations); GW_CU_CHECK_ERR(hipPeekAtLastError()); // *** central minimizers *** const std::uint32_t basepairs_per_thread = 8; // arbitrary, tradeoff between the number of thread blocks that can be scheduled simultaneously and the number of basepairs which have to be loaded multiple times beacuse only basepairs_per_thread*num_of_threads-(window_size_ + minimizer_size_ - 1) + 1 can be processed at once, i.e. window_size+minimizer_size-2 basepairs have to be loaded again num_of_threads = 64; // arbitrary const std::uint32_t basepairs_in_loop_step = num_of_threads * basepairs_per_thread; const std::uint32_t minimizers_in_loop_step = basepairs_in_loop_step - minimizer_size + 1; const std::uint32_t windows_in_loop_step = minimizers_in_loop_step - window_size + 1; shared_memory_for_kernel = 0; shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // forward basepairs (char) shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // reverse basepairs (char) shared_memory_for_kernel += minimizers_in_loop_step; // representations of minimizers (representation_t) shared_memory_for_kernel += (windows_in_loop_step + 7) / 8; // directions of representations of minimizers (char) shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // position_in_read of minimizers (position_in_read_t) shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // does the window have a different minimizer than its left neighbor shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char) shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t) shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char) shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes msg = "Launching find_central_minimizers with " + std::to_string(shared_memory_for_kernel) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); hipLaunchKernelGGL(( find_central_minimizers), dim3(number_of_reads_to_add), dim3(num_of_threads), shared_memory_for_kernel, cuda_stream, minimizer_size, window_size, basepairs_per_thread, merged_basepairs_d.data(), read_id_to_basepairs_section_d.data(), window_minimizers_representation_d.data(), window_minimizers_direction_d.data(), window_minimizers_position_in_read_d.data(), read_id_to_windows_section_d.data(), read_id_to_minimizers_written_d.data(), hash_representations); GW_CU_CHECK_ERR(hipPeekAtLastError()); // *** back end minimizers *** num_of_threads = 64; // largest window should fit shared memory shared_memory_for_kernel = 0; shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char) shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char) shared_memory_for_kernel += kmers_for_end_minimizers; // representations of minimizers (representation_t) shared_memory_for_kernel += (kmers_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char) shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes msg = "Launching find_back_end_minimizers with " + std::to_string(shared_memory_for_kernel) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); hipLaunchKernelGGL(( find_back_end_minimizers), dim3(number_of_reads_to_add), dim3(num_of_threads), shared_memory_for_kernel, cuda_stream, minimizer_size, window_size, merged_basepairs_d.data(), read_id_to_basepairs_section_d.data(), window_minimizers_representation_d.data(), window_minimizers_direction_d.data(), window_minimizers_position_in_read_d.data(), read_id_to_windows_section_d.data(), read_id_to_minimizers_written_d.data(), hash_representations); GW_CU_CHECK_ERR(hipPeekAtLastError()); // *** remove unused elemets from the window minimizers arrays *** // In window_minimizers_representation_d and other arrays enough space was allocated to support cases in which each window has a different minimizer. In reality many neighboring windows share the same minimizer // As a result there are unused areas between minimizers belonging to different reads (space_allocated_for_all_possible_minimizers_of_a_read - space_needed_for_the_actual_minimizers) // At this point all mininizer are put together (compressed) so that the last minimizer of one read is next to the first minimizer of another read // after this operation read_id_to_minimizers_written_d should be interpreted as read_id_to_index_of_the_first_minimizer_of_the_next_read_d thrust::inclusive_scan(thrust::hip::par(allocator).on(cuda_stream), read_id_to_minimizers_written_d.data(), read_id_to_minimizers_written_d.data() + read_id_to_minimizers_written_d.size(), read_id_to_minimizers_written_d.data()); // last element of contains the index of first minimizer of theoretical past-the-last read, which is equal to the overall number of minimizers std::int64_t total_minimizers = cudautils::get_value_from_device(&(read_id_to_minimizers_written_d.data()[read_id_to_minimizers_written_d.size() - 1]), cuda_stream); // Data is organized in two arrays in order to support usage of thrust::stable_sort_by_key. One contains representations (key) and the other the rest (values) device_buffer<representation_t> representations_compressed_d(total_minimizers, allocator, cuda_stream); // rest = position_in_read, direction and read_id device_buffer<ReadidPositionDirection> rest_compressed_d(total_minimizers, allocator, cuda_stream); msg = "Launching compress_minimizers with " + std::to_string(0) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); hipLaunchKernelGGL(( compress_minimizers), dim3(number_of_reads_to_add), dim3(128), 0, cuda_stream, window_minimizers_representation_d.data(), window_minimizers_position_in_read_d.data(), window_minimizers_direction_d.data(), read_id_to_windows_section_d.data(), representations_compressed_d.data(), rest_compressed_d.data(), read_id_to_minimizers_written_d.data(), read_id_of_first_read); GW_CU_CHECK_ERR(hipPeekAtLastError()); // free these arrays as they are not needed anymore window_minimizers_representation_d.free(); window_minimizers_direction_d.free(); window_minimizers_position_in_read_d.free(); read_id_to_minimizers_written_d.free(); read_id_to_windows_section_d.free(); return {std::move(representations_compressed_d), std::move(rest_compressed_d)}; } } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
3c4fe9e82d41d32acb0d1e08140897b8836a715b.cu
/* * Copyright 2019-2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <limits> #include "minimizer.hpp" #include <thrust/scan.h> #include <thrust/execution_policy.h> namespace claraparabricks { namespace genomeworks { namespace cudamapper { Minimizer::Minimizer(representation_t representation, position_in_read_t position_in_read, DirectionOfRepresentation direction, read_id_t read_id) : representation_(representation) , position_in_read_(position_in_read) , direction_(direction) , read_id_(read_id) { } representation_t Minimizer::representation() const { return representation_; } position_in_read_t Minimizer::position_in_read() const { return position_in_read_; } read_id_t Minimizer::read_id() const { return read_id_; } /// \brief Apply a hash function to a representation /// /// Because of the non-Poisson distribuition of DNA, some common sequences with common kmer-content (e.g long poly-A runs) /// may be over-represented in sketches. By applying a hash function, kmers are mapped to representations over /// a more uniform space. The hash function implemented here was developed by Thomas Wang and is described /// [here](https://gist.github.com/badboy/6267743). A mask is applied to the output so that all representations are mapped /// to a 32 bit space. /// /// \param key the input representation __device__ representation_t wang_hash64(representation_t key) { uint64_t mask = (uint64_t(1) << 32) - 1; key = (~key + (key << 21)) & mask; key = key ^ key >> 24; key = ((key + (key << 3)) + (key << 8)) & mask; key = key ^ key >> 14; key = ((key + (key << 2)) + (key << 4)) & mask; key = key ^ key >> 28; key = (key + (key << 31)) & mask; return key; } Minimizer::DirectionOfRepresentation Minimizer::direction() const { return direction_; } /// \brief finds front end minimizers /// /// Finds the minimizers of windows starting at position 0 and having window size range from 1 to window_size-1 /// /// \param minimizer_size kmer length /// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for front end minimizers window size actually varies from 1 to window_size-1) /// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on /// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read /// \param window_minimizers_representation output array of representations of minimizers, grouped by reads /// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads /// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition) /// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially zero) __global__ void find_front_end_minimizers(const std::uint64_t minimizer_size, const std::uint64_t window_size, const char* const basepairs, const ArrayBlock* const read_id_to_basepairs_section, representation_t* const window_minimizers_representation, char* const window_minimizers_direction, position_in_read_t* const window_minimizers_position_in_read, const ArrayBlock* const read_id_to_windows_section, std::int64_t* const read_id_to_minimizers_written, const bool hash_representations) { // TODO: simplify this method similarly to find_back_end_minimizers if (1 == window_size) { // if 1 == window_size there are no end minimizer return; } const auto input_array_first_element = read_id_to_basepairs_section[blockIdx.x].first_element_; const auto output_arrays_offset = read_id_to_windows_section[blockIdx.x].first_element_; // Dynamically allocating shared memory and assigning parts of it to different pointers // Everything is 8-byte alligned extern __shared__ std::uint64_t sm[]; // TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory // TODO: use sizeof to get the number of bytes std::uint32_t shared_memory_64_bit_elements_already_taken = 0; char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8; char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8; representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements shared_memory_64_bit_elements_already_taken += blockDim.x - (minimizer_size - 1); char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - forward, 1 - reverse shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 7) / 8; position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2; position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - same, 1 - different shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2; representation_t* minimizer_representation_of_largest_window_from_previous_step = (&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += 1; position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; // local index = index in section of the output array dedicated to this read position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; // TODO: Move to constant memory char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements if (0 == threadIdx.x) { forward_to_reverse_complement[0b000] = 0b0000; forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100) forward_to_reverse_complement[0b010] = 0b0000; forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111) forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1) forward_to_reverse_complement[0b101] = 0b0000; forward_to_reverse_complement[0b110] = 0b0000; forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11) } __syncthreads(); // Each thread loads one basepair, making it blockDim.x basepairs. Each kmer has minimizer_size elements // Number of windows is equal to the number of kmers for end minimizer // This means a total of blockDim.x - (minimizer_size - 1) kmer can be processed in one block, where each kmer is shifted by one one basepair compared to the previous kmer // For blockDim.x = 6 and minimizer_size = 3 there are 6 - (3 - 1) = 4 kmers // 0 1 2 // 1 2 3 // 2 3 4 // 3 4 5 // If more minimizers have to be processed a new step is needed, in this case meaning // 4 5 6 // 5 6 7 // 6 7 8 // 7 8 9 // This means that a number of basepairs is loaded twice, but this a tradeoff for less complex code const std::uint16_t windows_per_loop_step = blockDim.x - (minimizer_size - 1); *minimizer_representation_of_largest_window_from_previous_step = 0; *minimizer_position_in_read_of_largest_window_from_previous_step = 0; *local_index_to_write_next_minimizer_to = 0; for (std::uint32_t first_element_in_step = 0; first_element_in_step < window_size - 1; first_element_in_step += windows_per_loop_step) { // load basepairs into shared memory and calculate the lexical ordering hash if (first_element_in_step + threadIdx.x < window_size - 1 + minimizer_size - 1) { // window_size - 1 + minimizer_size - 1 -> total number of basepairs needed for all front minimizers const char bp = basepairs[input_array_first_element + first_element_in_step + threadIdx.x]; forward_basepair_hashes[threadIdx.x] = 0b11 & (bp >> 2 ^ bp >> 1); reverse_basepair_hashes[threadIdx.x] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1); } __syncthreads(); // First front end window covers only one minimizer (the one starting at positon 0), second minimizers starting at 0 and 1 and so one until the window which covers window_size-1 minimizers // For window_size = 7 and minimize_size = 3 this means: // window 0: 0 1 2 (0 1 2) // window 1: 0 1 2 3 (0 1 2; 1 2 3) // widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4) // window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5) // window 4: 0 1 2 3 4 5 6 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6) // window 5: 0 1 2 3 4 5 6 7 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6; 5 6 7) // If window_size > windows_per_loop_step the other windows have to be processed in other loop steps // For example, for blockDim.x = 6, minimizer_size = 3 (=> windows_per_loop_step = 4) and window_size = 7: // step 0 (first_element_in_step = 0): // window 0: 0 1 2 (0 1 2) // window 1: 0 1 2 3 (0 1 2; 1 2 3) // widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4) // window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5) // step 1 (first_element_in_step = 4): // window 4: 0 1 2 3 4 5 6 (take results for window 3 and add: 4 5 6) // window 5: 0 1 2 3 4 5 6 7 (take results for window 3 and add: 4 5 6; 5 6 7) // This means that a thread has a window assigned to it when thraedIdx.x < minimizers_per_loop (for all loops other than the last one) and // when first_element_in_step + threadIdx.x < window_size - 1 const bool thread_assigned_to_a_window = first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step; // calculate minimizer for each kmer in front end windows if (thread_assigned_to_a_window) { // largest front minimizer window starts at basepar 0 and goes up to window_size -1 representation_t forward_representation = 0; representation_t reverse_representation = 0; // TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element for (std::uint16_t i = 0; i < minimizer_size; ++i) { forward_representation |= forward_basepair_hashes[threadIdx.x + i] << 2 * (minimizer_size - i - 1); reverse_representation |= reverse_basepair_hashes[threadIdx.x + i] << 2 * i; } if (hash_representations) { forward_representation = wang_hash64(forward_representation); reverse_representation = wang_hash64(reverse_representation); } if (forward_representation <= reverse_representation) { minimizers_representation[threadIdx.x] = forward_representation; minimizers_direction[threadIdx.x] = 0; } else { minimizers_representation[threadIdx.x] = reverse_representation; minimizers_direction[threadIdx.x] = 1; } } __syncthreads(); representation_t window_minimizer_representation = 0; position_in_read_t window_minimizer_position_in_read = 0; // calculate minimizer for each window // Start by the value of the first minimizer and iteratively compare it with the other minimizers in the window // If first_element_in_step != 0 there is no need to go through all minimizers in the window. One can take the minimizer of window first_element_in_step-1 // as the current window would check exaclty the same minimizers before checking minimizer first_element_in_step if (thread_assigned_to_a_window) { if (first_element_in_step != 0) { window_minimizer_representation = *minimizer_representation_of_largest_window_from_previous_step; window_minimizer_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step; if (minimizers_representation[0] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[0]; window_minimizer_position_in_read = first_element_in_step; } } else { window_minimizer_representation = minimizers_representation[0]; window_minimizer_position_in_read = 0; } // All threads have to wait for the largest block to finish. Probably no better solution without big restructuring // If there are several minimizers with the same representation only save the latest one (thus <=), others will be covered by smaller windows for (std::uint16_t i = 1; i <= threadIdx.x; ++i) { if (minimizers_representation[i] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[i]; window_minimizer_position_in_read = first_element_in_step + i; } } minimizers_position_in_read[threadIdx.x] = window_minimizer_position_in_read; } __syncthreads(); // only write first occurence of each minimizer to the output array // Hash of the last kmer in a window can be a minimizer only if it is smaller or equal than the minimizer of the previous window // That means that the minimizer of the current window should only be written if it is different than the one of the previous window // Otherwise it it the same minimizer and there is no need write to the the output array // Imagine that hash representation of windows are are follows (the number in the parentheses marks the position of the last occurance of the minimizer with that representation): // 8, 89, 898, 8987, 89878, 898785, 8987856, 89878562 // Minimizers of these windows are // 8(0) 8(0) 8(2) 7(3) 7(3) 5(5) 5(5) 2(7) // If we use 1 to indicate the first occurence of minimizer and 0 for repretition we get // 1 0 1 1 0 1 0 1 // If we do an an inclusive scan on this array we get the indices to which the unique minimizers should be written to (plus one) // 1 1 2 3 3 4 4 5 // From this it's clear that only the windows whose value is larger than the one of its neighbor should write its minimizer and it should write to the element with the index of value-1 if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step) { if (0 == first_element_in_step && 0 == threadIdx.x) { // minimizer of first window is unique for sure as it has no left neighbor different_minimizer_than_neighbors[0] = 1; } else { representation_t neighbors_minimizers_position_in_read = 0; // find left neighbor's window minimizer's position in read if (0 == threadIdx.x) { neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step; } else { // TODO: consider using warp shuffle instead of shared memory neighbors_minimizers_position_in_read = minimizers_position_in_read[threadIdx.x - 1]; } // check if it's the same minimizer if (neighbors_minimizers_position_in_read == minimizers_position_in_read[threadIdx.x]) { different_minimizer_than_neighbors[threadIdx.x] = 0; } else { different_minimizer_than_neighbors[threadIdx.x] = 1; } } } __syncthreads(); // if there are more loop steps to follow write the value and position of minimizer of the largest window if (first_element_in_step + windows_per_loop_step < window_size - 1 && threadIdx.x == windows_per_loop_step - 1) { *minimizer_representation_of_largest_window_from_previous_step = window_minimizer_representation; *minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read; } // no need to sync, these two values are not used before the next sync // perform inclusive scan // different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one" // TODO: implement it using warp shuffle or use CUB if (0 == threadIdx.x) { std::uint16_t i = 0; different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to; for (i = 1; i < blockDim.x - (minimizer_size - 1); ++i) { different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1]; } } __syncthreads(); // now save minimizers to output array if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step) { const std::uint32_t neighbors_write_index = 0 == threadIdx.x ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[threadIdx.x - 1]; if (neighbors_write_index < different_minimizer_than_neighbors[threadIdx.x]) { const std::uint64_t output_index = output_arrays_offset + different_minimizer_than_neighbors[threadIdx.x] - 1; window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[threadIdx.x] - first_element_in_step]; window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[threadIdx.x] - first_element_in_step]; window_minimizers_position_in_read[output_index] = minimizers_position_in_read[threadIdx.x]; } } __syncthreads(); // index (plus one) to which the last window minimizer was written is the number of all unique front end window minimizers if (first_element_in_step + threadIdx.x == window_size - 1 - 1) { // "plus one" is already included in different_minimizer_than_neighbors as it was created by an inclusive scan read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[threadIdx.x]; } // if there are more loop steps to follow write the output array index of last minimizer in this loop step if (first_element_in_step + windows_per_loop_step <= window_size - 1 && threadIdx.x == windows_per_loop_step - 1) { *local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[threadIdx.x]; } } } /// \brief finds central minimizers /// /// Finds the minimizers of windows of size window_size starting at position 0 and moving by one basepair at a time /// /// \param minimizer_size kmer length /// \param window_size number of kmers in one window, kmers being shifted by one one basepair each /// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on /// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read /// \param window_minimizers_representation output array of representations of minimizers, grouped by reads /// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads /// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition) /// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end minimizers) __global__ void find_central_minimizers(const std::uint64_t minimizer_size, const std::uint64_t window_size, const std::uint32_t basepairs_per_thread, const char* const basepairs, const ArrayBlock* const read_id_to_basepairs_section, representation_t* const window_minimizers_representation, char* const window_minimizers_direction, position_in_read_t* const window_minimizers_position_in_read, const ArrayBlock* const read_id_to_windows_section, std::int64_t* const read_id_to_minimizers_written, const bool hash_representations) { // See find_front_end_minimizers for more details about the algorithm const std::uint64_t index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_; // Index of the element to which the first central minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads const std::uint64_t output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + read_id_to_minimizers_written[blockIdx.x]; const std::uint32_t basepairs_in_read = read_id_to_basepairs_section[blockIdx.x].block_size_; const std::uint32_t kmers_in_read = basepairs_in_read - (minimizer_size - 1); const std::uint32_t windows_in_read = kmers_in_read - (window_size - 1); const std::uint16_t basepairs_per_loop_step = blockDim.x * basepairs_per_thread; const std::uint16_t kmers_per_loop_step = basepairs_per_loop_step - (minimizer_size - 1); const std::uint16_t windows_per_loop_step = kmers_per_loop_step - (window_size - 1); // Dynamically allocating shared memory and assigning parts of it to different pointers // Everything is 8-byte alligned extern __shared__ std::uint64_t sm[]; // TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory // TODO: use sizeof to get the number of bytes std::uint32_t shared_memory_64_bit_elements_already_taken = 0; char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8; char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8; representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_loop_step elements shared_memory_64_bit_elements_already_taken += kmers_per_loop_step; char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - forward, 1 - reverse shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 7) / 8; position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2; position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - same, 1 - different shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2; position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element shared_memory_64_bit_elements_already_taken += (1 + 1) / 2; // TODO: Move to constant memory char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements if (0 == threadIdx.x) { forward_to_reverse_complement[0b000] = 0b0000; forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100) forward_to_reverse_complement[0b010] = 0b0000; forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111) forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1) forward_to_reverse_complement[0b101] = 0b0000; forward_to_reverse_complement[0b110] = 0b0000; forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11) } __syncthreads(); // if there are front minimizers take them into account if (0 != read_id_to_minimizers_written[blockIdx.x]) { *minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1]; *local_index_to_write_next_minimizer_to = read_id_to_minimizers_written[blockIdx.x]; } else { *minimizer_position_in_read_of_largest_window_from_previous_step = 0; // N/A *local_index_to_write_next_minimizer_to = 0; } for (std::uint32_t first_element_in_step = 0; first_element_in_step < windows_in_read; first_element_in_step += windows_per_loop_step) { // load basepairs into shared memory and calculate the lexical ordering hash for (std::uint32_t basepair_index = threadIdx.x; basepair_index < basepairs_per_loop_step && first_element_in_step + basepair_index < basepairs_in_read; basepair_index += blockDim.x) { const char bp = basepairs[index_of_first_element_to_process_global + first_element_in_step + basepair_index]; forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1); reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1); } __syncthreads(); // calculate kmer minimizers for (std::uint32_t kmer_index = threadIdx.x; kmer_index < kmers_per_loop_step && first_element_in_step + kmer_index < kmers_in_read; kmer_index += blockDim.x) { representation_t forward_representation = 0; representation_t reverse_representation = 0; // TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element for (std::uint16_t i = 0; i < minimizer_size; ++i) { forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1); reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i; } if (hash_representations) { forward_representation = wang_hash64(forward_representation); reverse_representation = wang_hash64(reverse_representation); } if (forward_representation <= reverse_representation) { minimizers_representation[kmer_index] = forward_representation; minimizers_direction[kmer_index] = 0; } else { minimizers_representation[kmer_index] = reverse_representation; minimizers_direction[kmer_index] = 1; } } __syncthreads(); position_in_read_t window_minimizer_position_in_read = 0; // find window minimizer for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x) { // assume that the minimizer of the first kmer in step is the window minimizer representation_t window_minimizer_representation = minimizers_representation[window_index]; window_minimizer_position_in_read = first_element_in_step + window_index; // now check the minimizers of all other windows for (std::uint16_t i = 1; i < window_size; ++i) { if (minimizers_representation[window_index + i] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[window_index + i]; window_minimizer_position_in_read = first_element_in_step + window_index + i; } } minimizers_position_in_read[window_index] = window_minimizer_position_in_read; } __syncthreads(); // check if the window to the left has a the same minimizer for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x) { // if this is the first window in read and there were no front end minimizers than this is the first occurence of this minimizer if (0 == first_element_in_step + window_index && 0 == read_id_to_minimizers_written[blockIdx.x]) { different_minimizer_than_neighbors[0] = 1; } else { representation_t neighbors_minimizers_position_in_read = 0; // find left neighbor's window minimizer's position in read if (0 == window_index) { neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step; } else { // TODO: consider using warp shuffle instead of shared memory neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1]; } // check if it's the same minimizer if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index]) { different_minimizer_than_neighbors[window_index] = 0; } else { different_minimizer_than_neighbors[window_index] = 1; } } } __syncthreads(); // if there are more loop steps to follow write the position of minimizer of the last window // "windows_per_loop_step % blockDim.x - 1" determines the thread which processes the last window if (first_element_in_step + windows_per_loop_step < windows_in_read && threadIdx.x == windows_per_loop_step % blockDim.x - 1) { *minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read; } // perform inclusive scan // different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one" // TODO: implement it using warp shuffle or use CUB if (0 == threadIdx.x) { std::uint16_t i = 0; different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to; for (i = 1; i < windows_per_loop_step && first_element_in_step + i < windows_in_read; ++i) { different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1]; } } __syncthreads(); // now save minimizers to output array for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x) { // if first_element_in_loop == 0 and window_index == 0 then *local_index_to_write_next_minimizer_to is set to 0 before entering the loop const std::uint32_t neighbors_write_index = 0 == window_index ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[window_index - 1]; if (neighbors_write_index < different_minimizer_than_neighbors[window_index]) { // output array offset added in inclusive sum const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1; window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - first_element_in_step]; window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - first_element_in_step]; window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index]; } } __syncthreads(); // increase the number of written minimizers by the number of central minimizers // the value is increased by the write index of the last window in read if (first_element_in_step + windows_per_loop_step >= windows_in_read && 0 == threadIdx.x) { // only do it when there is not going to be new loop step read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[windows_in_read - first_element_in_step - 1]; // write the index of the last window } // if there are more loop steps to follow write the output array index of the last minimizer in this loop step if (first_element_in_step + windows_per_loop_step < windows_in_read && 0 == threadIdx.x) { *local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[windows_per_loop_step - 1]; // index of last written minimizer + 1 } } } /// \brief finds back end minimizers /// /// Finds the minimizers of windows ending end the last basepair and having window size range from 1 to window_size-1 /// /// \param minimizer_size kmer length /// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for back end minimizers window size actually varies from 1 to window_size-1) /// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on /// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read /// \param window_minimizers_representation output array of representations of minimizers, grouped by reads /// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads /// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition) /// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end and central minimizers) __global__ void find_back_end_minimizers(const std::uint64_t minimizer_size, const std::uint64_t window_size, const char* const basepairs, const ArrayBlock* const read_id_to_basepairs_section, representation_t* const window_minimizers_representation, char* const window_minimizers_direction, position_in_read_t* const window_minimizers_position_in_read, const ArrayBlock* const read_id_to_windows_section, std::int64_t* const read_id_to_minimizers_written, const bool hash_representations) { // See find_front_end_minimizers for more details about the algorithm if (1 == window_size) { // if 1 == window_size there are no end minimizer return; } // Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the read const auto index_of_first_element_to_process_local = read_id_to_basepairs_section[blockIdx.x].block_size_ - (window_size - 1 + minimizer_size - 1); // Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the whole array of basepairs for all reads const auto index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_ + index_of_first_element_to_process_local; // Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the section dedicate to this read const auto output_index_to_write_the_first_minimizer_local = read_id_to_minimizers_written[blockIdx.x]; // Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads const auto output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + output_index_to_write_the_first_minimizer_local; // Dynamically allocating shared memory and assigning parts of it to different pointers // Everything is 8-byte alligned extern __shared__ std::uint64_t sm[]; // TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory // TODO: use sizeof to get the number of bytes std::uint32_t shared_memory_64_bit_elements_already_taken = 0; char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8; char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8; representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements shared_memory_64_bit_elements_already_taken += window_size - 1; char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements; 0 - forward, 1 - reverse shared_memory_64_bit_elements_already_taken += (window_size - 1 + 7) / 8; position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2; position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements; 0 - same, 1 - different shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2; // TODO: Move to constant memory char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements if (0 == threadIdx.x) { forward_to_reverse_complement[0b000] = 0b0000; forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100) forward_to_reverse_complement[0b010] = 0b0000; forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111) forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1) forward_to_reverse_complement[0b101] = 0b0000; forward_to_reverse_complement[0b110] = 0b0000; forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11) } __syncthreads(); // There are only window_size-1 back end windows. window_size usually has the value of a few dozens // Having windows_size so large that it does not fit the shared memory is unlikely // If that happens implement this method similarly to find_central_minimizers // load basepairs into shared memory and calculate the lexical ordering hash for (std::uint16_t basepair_index = threadIdx.x; basepair_index < window_size - 1 + minimizer_size - 1; basepair_index += blockDim.x) { const char bp = basepairs[index_of_first_element_to_process_global + basepair_index]; forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1); reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1); } __syncthreads(); // calculate kmer minimizers // For back end minimizers the number of kmers is the same as the number of windows for (std::uint16_t kmer_index = threadIdx.x; kmer_index < window_size - 1; kmer_index += blockDim.x) { representation_t forward_representation = 0; representation_t reverse_representation = 0; // TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element for (std::uint16_t i = 0; i < minimizer_size; ++i) { forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1); reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i; } if (hash_representations) { forward_representation = wang_hash64(forward_representation); reverse_representation = wang_hash64(reverse_representation); } if (forward_representation <= reverse_representation) { minimizers_representation[kmer_index] = forward_representation; minimizers_direction[kmer_index] = 0; } else { minimizers_representation[kmer_index] = reverse_representation; minimizers_direction[kmer_index] = 1; } } __syncthreads(); // find window minimizer for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x) { // assume that the first kmer in the window is the minimizer representation_t window_minimizer_representation = minimizers_representation[window_index]; position_in_read_t window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index; // now check other kmers in the window (note that this the back end minimizer, so not all windows have the same length) for (std::uint16_t i = 1; window_index + i < window_size - 1; ++i) { if (minimizers_representation[window_index + i] <= window_minimizer_representation) { window_minimizer_representation = minimizers_representation[window_index + i]; window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index + i; } } minimizers_position_in_read[window_index] = window_minimizer_position_in_read; } __syncthreads(); // check if the window to the left has a the same minimizer for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x) { representation_t neighbors_minimizers_position_in_read = 0; // find left neighbor's window minimizer's position in read if (0 == window_index) { // if this is the first window take the position of the minimizer of the last central minimizer neighbors_minimizers_position_in_read = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1]; } else { // TODO: consider using warp shuffle instead of shared memory neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1]; } // check if it's the same minimizer if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index]) { different_minimizer_than_neighbors[window_index] = 0; } else { different_minimizer_than_neighbors[window_index] = 1; } } __syncthreads(); // perform inclusive scan // different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one" // TODO: implement it using warp shuffle or use CUB if (0 == threadIdx.x) { // read_id_to_minimizers_written[blockIdx.x] is the index of the last written plus one different_minimizer_than_neighbors[0] += output_index_to_write_the_first_minimizer_local; for (std::uint16_t i = 1; i < window_size - 1; ++i) { different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1]; } } __syncthreads(); // now save minimizers to output array for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x) { // different_minimizer_than_neighbors contians an inclusive scan, i.e. it's index_to_write_to + 1 const std::uint32_t neighbors_write_index = 0 == window_index ? output_index_to_write_the_first_minimizer_local : different_minimizer_than_neighbors[window_index - 1]; if (neighbors_write_index < different_minimizer_than_neighbors[window_index]) { // to get the actual index to write to do -1 to different_minimizer_than_neighbors const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1; // substract index_of_first_element_to_process_local to get the index in shared memory window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local]; window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local]; window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index]; } } __syncthreads(); // save the write index of the last written minimizer if (0 == threadIdx.x) { read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[window_size - 1 - 1]; } } /// \brief packs minimizers of different reads together /// /// window_minimizers_representation, window_minimizers_position_in_read and window_minimizers_direction all allocate one element for each window in the read. /// Many windows share the same minimizer and each minimizer is written only once, meaning many elements do not contain minimizers. /// This function creates new arrays where such elements do not exist. /// Note that in the input arrays all minimizers of one read are written consecutively, i.e. [read_0 minimizers], [read_0 junk], [read_1 minimizers], [read_1 junk], [read_2 minimizers]... /// /// \param window_minimizers_representation array of representations of minimizers, grouped by reads /// \param window_minimizers_position_in_read array of positions in read of minimizers, grouped by reads /// \param window_minimizers_direction array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse) /// \param read_id_to_windows_section index of first element dedicated to that read in input arrays and the number of dedicated elements /// \param representations_compressed array of representations of minimizers, grouped by reads, without invalid elements between the reads /// \param rest_compressed array of read_ids, positions_in_read and directions of reads, grouped by reads, without invalid elements between the reads /// \param read_id_to_compressed_minimizers index of the first minimizer of the next read (array comes from an inclusive scan, hence all indices are shifted by one) /// \param read_id_of_first_read __global__ void compress_minimizers(const representation_t* const window_minimizers_representation, const position_in_read_t* const window_minimizers_position_in_read, const char* const window_minimizers_direction, const ArrayBlock* const read_id_to_windows_section, representation_t* const representations_compressed, Minimizer::ReadidPositionDirection* const rest_compressed, const std::int64_t* const read_id_to_compressed_minimizers, std::uint32_t read_id_of_first_read) { const auto first_input_minimizer = read_id_to_windows_section[blockIdx.x].first_element_; // elements have the index of read_id+1, i.e. everything is shifted by one const auto first_output_minimizer = blockIdx.x == 0 ? 0 : read_id_to_compressed_minimizers[blockIdx.x - 1]; const auto number_of_minimizers = read_id_to_compressed_minimizers[blockIdx.x] - first_output_minimizer; for (std::uint32_t i = threadIdx.x; i < number_of_minimizers; i += blockDim.x) { representations_compressed[first_output_minimizer + i] = window_minimizers_representation[first_input_minimizer + i]; rest_compressed[first_output_minimizer + i].read_id_ = blockIdx.x + read_id_of_first_read; rest_compressed[first_output_minimizer + i].position_in_read_ = window_minimizers_position_in_read[first_input_minimizer + i]; rest_compressed[first_output_minimizer + i].direction_ = window_minimizers_direction[first_input_minimizer + i]; } } Minimizer::GeneratedSketchElements Minimizer::generate_sketch_elements(DefaultDeviceAllocator allocator, const std::uint64_t number_of_reads_to_add, const std::uint64_t minimizer_size, const std::uint64_t window_size, const std::uint64_t read_id_of_first_read, const device_buffer<char>& merged_basepairs_d, const std::vector<ArrayBlock>& read_id_to_basepairs_section_h, const device_buffer<ArrayBlock>& read_id_to_basepairs_section_d, const bool hash_representations, const cudaStream_t cuda_stream) { GW_NVTX_RANGE(profiler, "Minimizer::generate_sketch_elements"); // for each read find the maximum number of minimizers (one per window), determine their section in the minimizer arrays and allocate the arrays std::uint64_t total_windows = 0; std::vector<ArrayBlock> read_id_to_windows_section_h(number_of_reads_to_add, {0, 0}); for (read_id_t read_id = 0; read_id < number_of_reads_to_add; ++read_id) { read_id_to_windows_section_h[read_id].first_element_ = total_windows; std::uint32_t windows = window_size - 1; // front end minimizers windows += read_id_to_basepairs_section_h[read_id].block_size_ - (minimizer_size + window_size - 1) + 1; // central minimizers windows += window_size - 1; read_id_to_windows_section_h[read_id].block_size_ = windows; total_windows += windows; } device_buffer<decltype(read_id_to_windows_section_h)::value_type> read_id_to_windows_section_d(read_id_to_windows_section_h.size(), allocator, cuda_stream); cudautils::device_copy_n_async(read_id_to_windows_section_h.data(), read_id_to_windows_section_h.size(), read_id_to_windows_section_d.data(), cuda_stream); // H2D device_buffer<representation_t> window_minimizers_representation_d(total_windows, allocator, cuda_stream); device_buffer<char> window_minimizers_direction_d(total_windows, allocator, cuda_stream); device_buffer<position_in_read_t> window_minimizers_position_in_read_d(total_windows, allocator, cuda_stream); device_buffer<std::int64_t> read_id_to_minimizers_written_d(number_of_reads_to_add, allocator, cuda_stream); // initially there are no minimizers written to the output arrays // TODO: is this needed? GW_CU_CHECK_ERR(cudaMemsetAsync(read_id_to_minimizers_written_d.data(), 0, number_of_reads_to_add * sizeof(std::int64_t), cuda_stream)); // *** front end minimizers *** std::uint32_t num_of_basepairs_for_front_minimizers = (window_size - 1) + minimizer_size - 1; std::uint32_t num_of_threads = std::min(num_of_basepairs_for_front_minimizers, 64u); // largest window in end minimizers has the size of window_size-1, meaning it covers window_size-1 + minimizer_size - 1 basepairs const std::uint32_t basepairs_for_end_minimizers = (window_size - 1 + minimizer_size - 1); const std::uint32_t kmers_for_end_minimizers = window_size - 1; // for end minimizers number of kmers is the as the number of windows because the last window has only one kmer const std::uint32_t windows_for_end_minimizers = window_size - 1; // determine total ammount for shared memory needed (see kernel for clarification) // shared memeory is alligned to 8 bytes, so for 1-byte variables (x+7)/8 values are allocate (for 10 1-byte elements (10+7)/8=17/8=2 8-byte elements are allocated, instead of 10/1=1 which would be wrong) // the final number of allocated 8-byte values is multiplied by 8 at the end in order to get number of bytes needed std::uint32_t shared_memory_for_kernel = 0; shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char) shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char) shared_memory_for_kernel += (kmers_for_end_minimizers); // representations of minimizers (representation_t) shared_memory_for_kernel += (windows_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor (position_in_read_t) shared_memory_for_kernel += 1; // representation from previous step shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char) shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t) shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char) shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes std::string msg = "Launching find_front_end_minimizers with " + std::to_string(shared_memory_for_kernel) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); find_front_end_minimizers<<<number_of_reads_to_add, num_of_threads, shared_memory_for_kernel, cuda_stream>>>(minimizer_size, window_size, merged_basepairs_d.data(), read_id_to_basepairs_section_d.data(), window_minimizers_representation_d.data(), window_minimizers_direction_d.data(), window_minimizers_position_in_read_d.data(), read_id_to_windows_section_d.data(), read_id_to_minimizers_written_d.data(), hash_representations); GW_CU_CHECK_ERR(cudaPeekAtLastError()); // *** central minimizers *** const std::uint32_t basepairs_per_thread = 8; // arbitrary, tradeoff between the number of thread blocks that can be scheduled simultaneously and the number of basepairs which have to be loaded multiple times beacuse only basepairs_per_thread*num_of_threads-(window_size_ + minimizer_size_ - 1) + 1 can be processed at once, i.e. window_size+minimizer_size-2 basepairs have to be loaded again num_of_threads = 64; // arbitrary const std::uint32_t basepairs_in_loop_step = num_of_threads * basepairs_per_thread; const std::uint32_t minimizers_in_loop_step = basepairs_in_loop_step - minimizer_size + 1; const std::uint32_t windows_in_loop_step = minimizers_in_loop_step - window_size + 1; shared_memory_for_kernel = 0; shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // forward basepairs (char) shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // reverse basepairs (char) shared_memory_for_kernel += minimizers_in_loop_step; // representations of minimizers (representation_t) shared_memory_for_kernel += (windows_in_loop_step + 7) / 8; // directions of representations of minimizers (char) shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // position_in_read of minimizers (position_in_read_t) shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // does the window have a different minimizer than its left neighbor shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char) shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t) shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char) shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes msg = "Launching find_central_minimizers with " + std::to_string(shared_memory_for_kernel) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); find_central_minimizers<<<number_of_reads_to_add, num_of_threads, shared_memory_for_kernel, cuda_stream>>>(minimizer_size, window_size, basepairs_per_thread, merged_basepairs_d.data(), read_id_to_basepairs_section_d.data(), window_minimizers_representation_d.data(), window_minimizers_direction_d.data(), window_minimizers_position_in_read_d.data(), read_id_to_windows_section_d.data(), read_id_to_minimizers_written_d.data(), hash_representations); GW_CU_CHECK_ERR(cudaPeekAtLastError()); // *** back end minimizers *** num_of_threads = 64; // largest window should fit shared memory shared_memory_for_kernel = 0; shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char) shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char) shared_memory_for_kernel += kmers_for_end_minimizers; // representations of minimizers (representation_t) shared_memory_for_kernel += (kmers_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t) shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char) shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes msg = "Launching find_back_end_minimizers with " + std::to_string(shared_memory_for_kernel) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); find_back_end_minimizers<<<number_of_reads_to_add, num_of_threads, shared_memory_for_kernel, cuda_stream>>>(minimizer_size, window_size, merged_basepairs_d.data(), read_id_to_basepairs_section_d.data(), window_minimizers_representation_d.data(), window_minimizers_direction_d.data(), window_minimizers_position_in_read_d.data(), read_id_to_windows_section_d.data(), read_id_to_minimizers_written_d.data(), hash_representations); GW_CU_CHECK_ERR(cudaPeekAtLastError()); // *** remove unused elemets from the window minimizers arrays *** // In window_minimizers_representation_d and other arrays enough space was allocated to support cases in which each window has a different minimizer. In reality many neighboring windows share the same minimizer // As a result there are unused areas between minimizers belonging to different reads (space_allocated_for_all_possible_minimizers_of_a_read - space_needed_for_the_actual_minimizers) // At this point all mininizer are put together (compressed) so that the last minimizer of one read is next to the first minimizer of another read // after this operation read_id_to_minimizers_written_d should be interpreted as read_id_to_index_of_the_first_minimizer_of_the_next_read_d thrust::inclusive_scan(thrust::cuda::par(allocator).on(cuda_stream), read_id_to_minimizers_written_d.data(), read_id_to_minimizers_written_d.data() + read_id_to_minimizers_written_d.size(), read_id_to_minimizers_written_d.data()); // last element of contains the index of first minimizer of theoretical past-the-last read, which is equal to the overall number of minimizers std::int64_t total_minimizers = cudautils::get_value_from_device(&(read_id_to_minimizers_written_d.data()[read_id_to_minimizers_written_d.size() - 1]), cuda_stream); // Data is organized in two arrays in order to support usage of thrust::stable_sort_by_key. One contains representations (key) and the other the rest (values) device_buffer<representation_t> representations_compressed_d(total_minimizers, allocator, cuda_stream); // rest = position_in_read, direction and read_id device_buffer<ReadidPositionDirection> rest_compressed_d(total_minimizers, allocator, cuda_stream); msg = "Launching compress_minimizers with " + std::to_string(0) + " bytes of shared memory"; GW_LOG_INFO(msg.c_str()); compress_minimizers<<<number_of_reads_to_add, 128, 0, cuda_stream>>>(window_minimizers_representation_d.data(), window_minimizers_position_in_read_d.data(), window_minimizers_direction_d.data(), read_id_to_windows_section_d.data(), representations_compressed_d.data(), rest_compressed_d.data(), read_id_to_minimizers_written_d.data(), read_id_of_first_read); GW_CU_CHECK_ERR(cudaPeekAtLastError()); // free these arrays as they are not needed anymore window_minimizers_representation_d.free(); window_minimizers_direction_d.free(); window_minimizers_position_in_read_d.free(); read_id_to_minimizers_written_d.free(); read_id_to_windows_section_d.free(); return {std::move(representations_compressed_d), std::move(rest_compressed_d)}; } } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
7622c8c3e66711cc8ad62375e1226d684a1a48aa.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/matrix/math.cuh> #include <raft/random/rng.cuh> #include <raft/stats/meanvar.cuh> #include <raft/util/cudart_utils.hpp> #include <algorithm> namespace raft { namespace stats { template <typename T> struct MeanVarInputs { T mean, stddev; int rows, cols; bool sample, rowMajor; unsigned long long int seed; static const int N_SIGMAS = 6; T mean_tol() const { return T(N_SIGMAS) * stddev / sqrt(T(rows)); } T var_tol() const { return T(N_SIGMAS) * stddev * stddev * sqrt(T(2.0) / T(::max(1, rows - 1))); } }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MeanVarInputs<T>& ps) { return os << "rows: " << ps.rows << "; cols: " << ps.cols << "; " << (ps.rowMajor ? "row-major" : "col-major") << " (tolerance: mean = " << ps.mean_tol() << ", var = " << ps.var_tol() << ")"; } template <typename T> class MeanVarTest : public ::testing::TestWithParam<MeanVarInputs<T>> { public: MeanVarTest() : params(::testing::TestWithParam<MeanVarInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(params.rows * params.cols, stream), mean_act(params.cols, stream), vars_act(params.cols, stream) { } protected: void SetUp() override { random::RngState r(params.seed); normal(handle, r, data.data(), params.cols * params.rows, params.mean, params.stddev); if (params.rowMajor) { using layout = raft::row_major; meanvar( handle, raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols), raft::make_device_vector_view<T, int>(mean_act.data(), params.cols), raft::make_device_vector_view<T, int>(vars_act.data(), params.cols), params.sample); } else { using layout = raft::col_major; meanvar( handle, raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols), raft::make_device_vector_view<T, int>(mean_act.data(), params.cols), raft::make_device_vector_view<T, int>(vars_act.data(), params.cols), params.sample); } RAFT_CUDA_TRY(hipStreamSynchronize(stream)); } protected: raft::resources handle; hipStream_t stream; MeanVarInputs<T> params; rmm::device_uvector<T> data, mean_act, vars_act; }; const std::vector<MeanVarInputs<float>> inputsf = { {1.f, 2.f, 1024, 32, true, false, 1234ULL}, {1.f, 2.f, 1024, 64, true, false, 1234ULL}, {1.f, 2.f, 1024, 128, true, false, 1234ULL}, {1.f, 2.f, 1024, 256, true, false, 1234ULL}, {-1.f, 2.f, 1024, 32, false, false, 1234ULL}, {-1.f, 2.f, 1024, 64, false, false, 1234ULL}, {-1.f, 2.f, 1024, 128, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 257, false, false, 1234ULL}, {1.f, 2.f, 1024, 32, true, true, 1234ULL}, {1.f, 2.f, 1024, 64, true, true, 1234ULL}, {1.f, 2.f, 1024, 128, true, true, 1234ULL}, {1.f, 2.f, 1024, 256, true, true, 1234ULL}, {-1.f, 2.f, 1024, 32, false, true, 1234ULL}, {-1.f, 2.f, 1024, 64, false, true, 1234ULL}, {-1.f, 2.f, 1024, 128, false, true, 1234ULL}, {-1.f, 2.f, 1024, 256, false, true, 1234ULL}, {-1.f, 2.f, 1024, 257, false, true, 1234ULL}, {-1.f, 2.f, 700, 13, false, true, 1234ULL}, {10.f, 2.f, 500000, 811, false, true, 1234ULL}}; const std::vector<MeanVarInputs<double>> inputsd = {{1.0, 2.0, 1024, 32, true, false, 1234ULL}, {1.0, 2.0, 1024, 64, true, false, 1234ULL}, {1.0, 2.0, 1024, 128, true, false, 1234ULL}, {1.0, 2.0, 1024, 256, true, false, 1234ULL}, {-1.0, 2.0, 1024, 32, false, false, 1234ULL}, {-1.0, 2.0, 1024, 64, false, false, 1234ULL}, {-1.0, 2.0, 1024, 128, false, false, 1234ULL}, {-1.0, 2.0, 1024, 256, false, false, 1234ULL}, {1.0, 2.0, 1024, 32, true, true, 1234ULL}, {1.0, 2.0, 1024, 64, true, true, 1234ULL}, {1.0, 2.0, 1024, 128, true, true, 1234ULL}, {1.0, 2.0, 1024, 256, true, true, 1234ULL}, {-1.0, 2.0, 1024, 32, false, true, 1234ULL}, {-1.0, 2.0, 1024, 64, false, true, 1234ULL}, {-1.0, 2.0, 1024, 128, false, true, 1234ULL}, {-1.0, 2.0, 1024, 256, false, true, 1234ULL}}; typedef MeanVarTest<float> MeanVarTestF; TEST_P(MeanVarTestF, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<float>(params.mean_tol()), stream)); ASSERT_TRUE(devArrMatch(params.stddev * params.stddev, vars_act.data(), params.cols, CompareApproxNoScaling<float>(params.var_tol()), stream)); } typedef MeanVarTest<double> MeanVarTestD; TEST_P(MeanVarTestD, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<double>(params.mean_tol()), stream)); ASSERT_TRUE(devArrMatch(params.stddev * params.stddev, vars_act.data(), params.cols, CompareApproxNoScaling<double>(params.var_tol()), stream)); } INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
7622c8c3e66711cc8ad62375e1226d684a1a48aa.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/matrix/math.cuh> #include <raft/random/rng.cuh> #include <raft/stats/meanvar.cuh> #include <raft/util/cudart_utils.hpp> #include <algorithm> namespace raft { namespace stats { template <typename T> struct MeanVarInputs { T mean, stddev; int rows, cols; bool sample, rowMajor; unsigned long long int seed; static const int N_SIGMAS = 6; T mean_tol() const { return T(N_SIGMAS) * stddev / sqrt(T(rows)); } T var_tol() const { return T(N_SIGMAS) * stddev * stddev * sqrt(T(2.0) / T(std::max(1, rows - 1))); } }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MeanVarInputs<T>& ps) { return os << "rows: " << ps.rows << "; cols: " << ps.cols << "; " << (ps.rowMajor ? "row-major" : "col-major") << " (tolerance: mean = " << ps.mean_tol() << ", var = " << ps.var_tol() << ")"; } template <typename T> class MeanVarTest : public ::testing::TestWithParam<MeanVarInputs<T>> { public: MeanVarTest() : params(::testing::TestWithParam<MeanVarInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(params.rows * params.cols, stream), mean_act(params.cols, stream), vars_act(params.cols, stream) { } protected: void SetUp() override { random::RngState r(params.seed); normal(handle, r, data.data(), params.cols * params.rows, params.mean, params.stddev); if (params.rowMajor) { using layout = raft::row_major; meanvar( handle, raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols), raft::make_device_vector_view<T, int>(mean_act.data(), params.cols), raft::make_device_vector_view<T, int>(vars_act.data(), params.cols), params.sample); } else { using layout = raft::col_major; meanvar( handle, raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols), raft::make_device_vector_view<T, int>(mean_act.data(), params.cols), raft::make_device_vector_view<T, int>(vars_act.data(), params.cols), params.sample); } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: raft::resources handle; cudaStream_t stream; MeanVarInputs<T> params; rmm::device_uvector<T> data, mean_act, vars_act; }; const std::vector<MeanVarInputs<float>> inputsf = { {1.f, 2.f, 1024, 32, true, false, 1234ULL}, {1.f, 2.f, 1024, 64, true, false, 1234ULL}, {1.f, 2.f, 1024, 128, true, false, 1234ULL}, {1.f, 2.f, 1024, 256, true, false, 1234ULL}, {-1.f, 2.f, 1024, 32, false, false, 1234ULL}, {-1.f, 2.f, 1024, 64, false, false, 1234ULL}, {-1.f, 2.f, 1024, 128, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 257, false, false, 1234ULL}, {1.f, 2.f, 1024, 32, true, true, 1234ULL}, {1.f, 2.f, 1024, 64, true, true, 1234ULL}, {1.f, 2.f, 1024, 128, true, true, 1234ULL}, {1.f, 2.f, 1024, 256, true, true, 1234ULL}, {-1.f, 2.f, 1024, 32, false, true, 1234ULL}, {-1.f, 2.f, 1024, 64, false, true, 1234ULL}, {-1.f, 2.f, 1024, 128, false, true, 1234ULL}, {-1.f, 2.f, 1024, 256, false, true, 1234ULL}, {-1.f, 2.f, 1024, 257, false, true, 1234ULL}, {-1.f, 2.f, 700, 13, false, true, 1234ULL}, {10.f, 2.f, 500000, 811, false, true, 1234ULL}}; const std::vector<MeanVarInputs<double>> inputsd = {{1.0, 2.0, 1024, 32, true, false, 1234ULL}, {1.0, 2.0, 1024, 64, true, false, 1234ULL}, {1.0, 2.0, 1024, 128, true, false, 1234ULL}, {1.0, 2.0, 1024, 256, true, false, 1234ULL}, {-1.0, 2.0, 1024, 32, false, false, 1234ULL}, {-1.0, 2.0, 1024, 64, false, false, 1234ULL}, {-1.0, 2.0, 1024, 128, false, false, 1234ULL}, {-1.0, 2.0, 1024, 256, false, false, 1234ULL}, {1.0, 2.0, 1024, 32, true, true, 1234ULL}, {1.0, 2.0, 1024, 64, true, true, 1234ULL}, {1.0, 2.0, 1024, 128, true, true, 1234ULL}, {1.0, 2.0, 1024, 256, true, true, 1234ULL}, {-1.0, 2.0, 1024, 32, false, true, 1234ULL}, {-1.0, 2.0, 1024, 64, false, true, 1234ULL}, {-1.0, 2.0, 1024, 128, false, true, 1234ULL}, {-1.0, 2.0, 1024, 256, false, true, 1234ULL}}; typedef MeanVarTest<float> MeanVarTestF; TEST_P(MeanVarTestF, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<float>(params.mean_tol()), stream)); ASSERT_TRUE(devArrMatch(params.stddev * params.stddev, vars_act.data(), params.cols, CompareApproxNoScaling<float>(params.var_tol()), stream)); } typedef MeanVarTest<double> MeanVarTestD; TEST_P(MeanVarTestD, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<double>(params.mean_tol()), stream)); ASSERT_TRUE(devArrMatch(params.stddev * params.stddev, vars_act.data(), params.cols, CompareApproxNoScaling<double>(params.var_tol()), stream)); } INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
630b6f5e8dcc67465a38b800a6b5a386ba0bd10f.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <cfloat> #include <string> #include <vector> #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_dnn.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/flags.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/activation_functor.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/norm_utils.h" PHI_DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace phi { namespace fusion { template <typename T, typename Context> void FusedBatchNormActKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const DenseTensor &mean, const DenseTensor &variance, float momentum, float epsilon, const std::string &act_type, DenseTensor *y, DenseTensor *mean_out, DenseTensor *variance_out, DenseTensor *saved_mean, DenseTensor *saved_variance, DenseTensor *reserve_space) { // Note(andsonder): Fused bn activation only used in the gpu place. #if defined(PADDLE_WITH_CUDA) and CUDNN_VERSION >= 7401 using CudnnDataType = phi::backends::gpu::CudnnDataType<T>; using BatchNormParamType = typename CudnnDataType::BatchNormParamType; bool is_gpu_place = dev_ctx.GetPlace().GetType() == phi::AllocationType::GPU; PADDLE_ENFORCE_EQ(is_gpu_place, true, phi::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon1 = static_cast<double>(epsilon); if (epsilon1 <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon1 = ::max(epsilon1, CUDNN_BN_MIN_EPSILON); // Get the size for each dimension. // NHWC [batch_size, in_height, in_width, in_channels] const auto &x_dims = x.dims(); PADDLE_ENFORCE_EQ(x_dims.size() >= 2 && x_dims.size() <= 5, true, phi::errors::PreconditionNotMet( "The Input dim size should be between 2 and 5")); // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. dev_ctx.template Alloc<BatchNormParamType>(mean_out); dev_ctx.template Alloc<BatchNormParamType>(variance_out); dev_ctx.template Alloc<BatchNormParamType>(saved_mean); dev_ctx.template Alloc<BatchNormParamType>(saved_variance); dev_ctx.template Alloc<T>(y); int N, C, H, W, D; const DataLayout data_layout = phi::DataLayout::kNHWC; phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); if ((N * H * W * D) == 1) { // Only 1 element in normalization dimension, // skip the batch norm calculation, let y = act(x). auto x_v = phi::EigenVector<T>::Flatten(x); auto y_v = phi::EigenVector<T>::Flatten(*y); auto &dev = *dev_ctx.eigen_device(); if (act_type == "relu") { phi::funcs::ReluCUDAFunctor<T>()(dev, x_v, y_v); } else { PADDLE_THROW(phi::errors::Unimplemented("Unsupported activation type")); } return; } // ------------------- cudnn descriptors --------------------- auto handle = dev_ctx.cudnn_handle(); cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); VLOG(3) << "Setting descriptors."; std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C}; PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; phi::backends::gpu::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; phi::DenseTensor workspace_tensor; PADDLE_ENFORCE_NOT_NULL( reserve_space, phi::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*zDesc=*/nullptr, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*activationDesc=*/activation_desc_, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space->Resize( {static_cast<int64_t>((reserve_space_size + phi::SizeOf(x.dtype()) - 1) / phi::SizeOf(x.dtype()))}); reserve_space_ptr = dev_ctx.template Alloc<T>(reserve_space); workspace_tensor.Resize({static_cast<int64_t>( (workspace_size + phi::SizeOf(x.dtype()) - 1) / phi::SizeOf(x.dtype()))}); workspace_ptr = dev_ctx.template Alloc<T>(&workspace_tensor); PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x.template data<T>(), nullptr, nullptr, data_desc_, y->template data<T>(), bn_param_desc_, scale.template data<BatchNormParamType>(), bias.template data<BatchNormParamType>(), this_factor, dev_ctx.template Alloc<BatchNormParamType>(mean_out), dev_ctx.template Alloc<BatchNormParamType>(variance_out), epsilon1, dev_ctx.template Alloc<BatchNormParamType>(saved_mean), dev_ctx.template Alloc<BatchNormParamType>(saved_variance), activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); #else PADDLE_THROW(phi::errors::Unimplemented( "The fused_batch_norm_act operator is not supported on GPU " "when CUDNN version < 7.4.1")); #endif } } // namespace fusion } // namespace phi PD_REGISTER_KERNEL(fused_batch_norm_act, GPU, ALL_LAYOUT, phi::fusion::FusedBatchNormActKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32); } }
630b6f5e8dcc67465a38b800a6b5a386ba0bd10f.cu
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <cfloat> #include <string> #include <vector> #ifdef __NVCC__ #include "cub/cub.cuh" #endif #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_dnn.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/flags.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/activation_functor.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/norm_utils.h" PHI_DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace phi { namespace fusion { template <typename T, typename Context> void FusedBatchNormActKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const DenseTensor &mean, const DenseTensor &variance, float momentum, float epsilon, const std::string &act_type, DenseTensor *y, DenseTensor *mean_out, DenseTensor *variance_out, DenseTensor *saved_mean, DenseTensor *saved_variance, DenseTensor *reserve_space) { // Note(andsonder): Fused bn activation only used in the gpu place. #if defined(PADDLE_WITH_CUDA) and CUDNN_VERSION >= 7401 using CudnnDataType = phi::backends::gpu::CudnnDataType<T>; using BatchNormParamType = typename CudnnDataType::BatchNormParamType; bool is_gpu_place = dev_ctx.GetPlace().GetType() == phi::AllocationType::GPU; PADDLE_ENFORCE_EQ(is_gpu_place, true, phi::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon1 = static_cast<double>(epsilon); if (epsilon1 <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon1 = std::max(epsilon1, CUDNN_BN_MIN_EPSILON); // Get the size for each dimension. // NHWC [batch_size, in_height, in_width, in_channels] const auto &x_dims = x.dims(); PADDLE_ENFORCE_EQ(x_dims.size() >= 2 && x_dims.size() <= 5, true, phi::errors::PreconditionNotMet( "The Input dim size should be between 2 and 5")); // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. dev_ctx.template Alloc<BatchNormParamType>(mean_out); dev_ctx.template Alloc<BatchNormParamType>(variance_out); dev_ctx.template Alloc<BatchNormParamType>(saved_mean); dev_ctx.template Alloc<BatchNormParamType>(saved_variance); dev_ctx.template Alloc<T>(y); int N, C, H, W, D; const DataLayout data_layout = phi::DataLayout::kNHWC; phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); if ((N * H * W * D) == 1) { // Only 1 element in normalization dimension, // skip the batch norm calculation, let y = act(x). auto x_v = phi::EigenVector<T>::Flatten(x); auto y_v = phi::EigenVector<T>::Flatten(*y); auto &dev = *dev_ctx.eigen_device(); if (act_type == "relu") { phi::funcs::ReluCUDAFunctor<T>()(dev, x_v, y_v); } else { PADDLE_THROW(phi::errors::Unimplemented("Unsupported activation type")); } return; } // ------------------- cudnn descriptors --------------------- auto handle = dev_ctx.cudnn_handle(); cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); VLOG(3) << "Setting descriptors."; std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C}; PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; phi::backends::gpu::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; phi::DenseTensor workspace_tensor; PADDLE_ENFORCE_NOT_NULL( reserve_space, phi::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*zDesc=*/nullptr, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*activationDesc=*/activation_desc_, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space->Resize( {static_cast<int64_t>((reserve_space_size + phi::SizeOf(x.dtype()) - 1) / phi::SizeOf(x.dtype()))}); reserve_space_ptr = dev_ctx.template Alloc<T>(reserve_space); workspace_tensor.Resize({static_cast<int64_t>( (workspace_size + phi::SizeOf(x.dtype()) - 1) / phi::SizeOf(x.dtype()))}); workspace_ptr = dev_ctx.template Alloc<T>(&workspace_tensor); PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x.template data<T>(), nullptr, nullptr, data_desc_, y->template data<T>(), bn_param_desc_, scale.template data<BatchNormParamType>(), bias.template data<BatchNormParamType>(), this_factor, dev_ctx.template Alloc<BatchNormParamType>(mean_out), dev_ctx.template Alloc<BatchNormParamType>(variance_out), epsilon1, dev_ctx.template Alloc<BatchNormParamType>(saved_mean), dev_ctx.template Alloc<BatchNormParamType>(saved_variance), activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( phi::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); #else PADDLE_THROW(phi::errors::Unimplemented( "The fused_batch_norm_act operator is not supported on GPU " "when CUDNN version < 7.4.1")); #endif } } // namespace fusion } // namespace phi PD_REGISTER_KERNEL(fused_batch_norm_act, GPU, ALL_LAYOUT, phi::fusion::FusedBatchNormActKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32); } }
09a9b48e1d9b5edf1eadda7d599db87aa82f0f4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the author. * * This version differs from the other vectorAdd example in that managed memory is * used instead of regular host and device memory. */ #include <cuda/api.hpp> #include <iostream> #include <algorithm> #include <cmath> __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main() { if (cuda::device::count() == 0) { std::cerr << "No CUDA devices on this system" << "\n"; exit(EXIT_FAILURE); } int numElements = 50000; std::cout << "[Vector addition of " << numElements << " elements]\n"; auto buffer_A = cuda::memory::managed::make_unique<float[]>(numElements); auto buffer_B = cuda::memory::managed::make_unique<float[]>(numElements); auto buffer_C = cuda::memory::managed::make_unique<float[]>(numElements); auto generator = []() { return rand() / (float) RAND_MAX; }; std::generate(buffer_A.get(), buffer_A.get() + numElements, generator); std::generate(buffer_B.get(), buffer_B.get() + numElements, generator); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n"; cuda::launch( vectorAdd, cuda::make_launch_config( blocksPerGrid, threadsPerBlock ), buffer_A.get(), buffer_B.get(), buffer_C.get(), numElements ); // Synchronization is necessary here despite the synchronous nature of the default stream - // since the copying-back of data is not something we've waited for cuda::device::current::get().synchronize(); // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (::fabs(buffer_A.get()[i] + buffer_B.get()[i] - buffer_C.get()[i]) > 1e-5) { std::cerr << "Result verification failed at element " << i << "\n"; exit(EXIT_FAILURE); } } std::cout << "Test PASSED\n"; std::cout << "SUCCESS\n"; return 0; }
09a9b48e1d9b5edf1eadda7d599db87aa82f0f4b.cu
/** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the author. * * This version differs from the other vectorAdd example in that managed memory is * used instead of regular host and device memory. */ #include <cuda/api.hpp> #include <iostream> #include <algorithm> #include <cmath> __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main() { if (cuda::device::count() == 0) { std::cerr << "No CUDA devices on this system" << "\n"; exit(EXIT_FAILURE); } int numElements = 50000; std::cout << "[Vector addition of " << numElements << " elements]\n"; auto buffer_A = cuda::memory::managed::make_unique<float[]>(numElements); auto buffer_B = cuda::memory::managed::make_unique<float[]>(numElements); auto buffer_C = cuda::memory::managed::make_unique<float[]>(numElements); auto generator = []() { return rand() / (float) RAND_MAX; }; std::generate(buffer_A.get(), buffer_A.get() + numElements, generator); std::generate(buffer_B.get(), buffer_B.get() + numElements, generator); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n"; cuda::launch( vectorAdd, cuda::make_launch_config( blocksPerGrid, threadsPerBlock ), buffer_A.get(), buffer_B.get(), buffer_C.get(), numElements ); // Synchronization is necessary here despite the synchronous nature of the default stream - // since the copying-back of data is not something we've waited for cuda::device::current::get().synchronize(); // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (std::fabs(buffer_A.get()[i] + buffer_B.get()[i] - buffer_C.get()[i]) > 1e-5) { std::cerr << "Result verification failed at element " << i << "\n"; exit(EXIT_FAILURE); } } std::cout << "Test PASSED\n"; std::cout << "SUCCESS\n"; return 0; }
6821ff8d9f7ed06d3a37d2dd80cf367bf21006e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <cmath> #include <vector> #include <fstream> #include <iostream> #include <algorithm> #include <hiprand/hiprand.h> #include <string> #define N 9 #define n 3 #define cudaCheckError() { \ hipError_t error = hipGetLastError(); \ if(error != hipSuccess) { \ printf("CUDA error: %s\n", hipGetErrorString(error)); \ exit(-1); \ } \ } void load(char* filename, int* board) { FILE * f = fopen(filename, "r"); if(f == NULL) { printf("Could not open file\n"); return; } char tmpBuff; for(int i=0; i < N; i++) { for(int j=0; j < N; j++) { if(!fscanf(f, "%c\n", &tmpBuff)) { printf("Error reading char\n"); return; } if(tmpBuff >= '1' && tmpBuff <= '9') { board[i*N + j] = (int) (tmpBuff - '0'); } else { board[i*N + j] = 0; } } } fclose(f); } void printBoard(int* board) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%d", board[i*N + j]); } printf("\n"); } } void writeBoard(std::string fn, int* board) { std::ofstream f; std::string filename = fn + ".sol"; // printf(filename.c_str()); f.open(filename.c_str()); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { f << board[i*N + j]; } f << "\n"; } f.close(); } __global__ void genChildBoards(int* frontierBoards, int* childBoards, int numFrontierBoards, int* boardIdx, int* emptySpaces, int* emptySpacesCount) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int offset = idx * N*N; while(idx < numFrontierBoards) { //each thread searches one N*N board for next empty space bool found = false; for(int i = offset; (found == false) && (i < offset + N*N); i++) { if(frontierBoards[i] == 0) { found = true; //now generate a child board with valid input within the constraints int row = (i - offset) / N; int col = (i - offset) % N; //guess integer between [1-N] for(int guess = 1; guess <= N; guess++){ bool valid = true; //check if guess exists in same row for(int c=0; c < N; c++) { if(frontierBoards[row*N + c + offset] == guess) { valid = false; } } //check if guess exists in same column for(int r=0; r < N; r++) { if(frontierBoards[r*N + col + offset] == guess) { valid = false; } } //check if guess exists in same 3x3 box for(int r = n*(row/n); r < n; r++) { for(int c = n*(col/n); c < n; c++) { if(frontierBoards[r*N + c + offset] == guess) { valid = false; } } } //persist the new child board if(valid == true) { int childBoardIdx = atomicAdd(boardIdx, 1); //multiple threads updating this index int emptyIdx = 0; for(int r=0; r < N; r++) { for(int c=0; c < N; c++) { childBoards[childBoardIdx*N*N + r*N + c] = frontierBoards[idx*N*N + r*N + c]; if(frontierBoards[idx*N*N + r*N + c] == 0 && (r != row || c != col)) { emptySpaces[emptyIdx + N*N*childBoardIdx] = r*9 + c; emptyIdx++; } } emptySpacesCount[childBoardIdx] = emptyIdx; //num empty spaces on board childBoards[childBoardIdx*N*N + row*N + col] = guess; } } } } } idx += blockDim.x * gridDim.x; } } __device__ void resetBitmap(bool* map, int mapsize) { for(int i=0; i < mapsize; i++) { map[i] = false; } } //checks if value has been seen earlier //if it has the board is invalid __device__ bool seenIt(int val, bool* seen) { if (val != 0) { if (seen[val-1]) { return true; } seen[val-1] = true; return false; } return false; } //check entire board for value __device__ bool valid(const int* board) { bool seen[N]; resetBitmap(seen, N); //check row for repetitions for(int i=0; i<N; i++) { resetBitmap(seen, N); for(int j=0; j<N; j++) { int v = board[i*N + j]; if(seenIt(v, seen)) { return false; } } } //check col for repetitions for(int j=0; j<N; j++) { resetBitmap(seen, N); for(int i=0; i<N; i++) { int v = board[i*N + j]; if(seenIt(v, seen)) { return false; } } } //check 3x3 for repetitions for(int ridx=0; ridx < n; ridx++) { for(int cidx=0; cidx < n; cidx++) { resetBitmap(seen, N); for(int i=0; i<n; i++) { for(int j=0; j<n; j++) { int v = board[(ridx*n + i)*N + (cidx*n + j)]; if(seenIt(v, seen)) { return false; } } } } } return true; } //chech if change is valud __device__ bool valid(const int* board, int changedIdx) { int r = changedIdx / 9; int c = changedIdx % 9; bool seen[N]; resetBitmap(seen, N); if(changedIdx < 0) { return valid(board); } if((board[changedIdx] < 1) || (board[changedIdx] > 9)) { return false; } //check for repetitions in row for(int i=0; i < N; i++) { int v = board[r*N + i]; if(seenIt(v, seen)) { return false; } } //check for repetitions in col resetBitmap(seen, N); for(int i=0; i < N; i++) { int v = board[i*N + c]; if(seenIt(v, seen)) { return false; } } //check 3x3 for repetitions int ridx = r / n; int cidx = c / n; resetBitmap(seen, N); for (int i=0; i < n; i++) { for (int j=0; j < n; j++) { int v = board[(ridx*n + i)*N + (cidx*n + j)]; if(seenIt(v, seen)) { return false; } } } return true; } __global__ void findSolution(int* boards, const int numBoards, int* emptySpaces, int* emptySpacesCount, int* found, int* solution) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int* curBoard; int* curEmptySpace; int curEmptySpaceCount; while ((*found == 0) && (idx < numBoards)) { int emptyIdx = 0; curBoard = boards + idx*N*N; curEmptySpace = emptySpaces + idx*N*N; curEmptySpaceCount = emptySpacesCount[idx]; while((emptyIdx >= 0) && (emptyIdx < curEmptySpaceCount)) { curBoard[curEmptySpace[emptyIdx]]++; if(!valid(curBoard, curEmptySpace[emptyIdx])) { //backtrack if(curBoard[curEmptySpace[emptyIdx]] >= 9) { //reset curBoard[curEmptySpace[emptyIdx]] = 0; emptyIdx--; } } else { emptyIdx++; } } // # if __CUDA_ARCH__>=200 // printf("emptyIdx = %d \n", emptyIdx); // printf("curEmptySpaceCount = %d \n", curEmptySpaceCount); // #endif if(emptyIdx == curEmptySpaceCount) { // # if __CUDA_ARCH__>=200 // printf("found solution \n"); // #endif *found = 1; for(int i=0; i < N*N; i++) { solution[i] = curBoard[i]; } } idx += gridDim.x * blockDim.x; } } // magic starts here int main(int argc, char* argv[]) { if (argc < 2){ printf("Usage: sudokusolver (filename.in)\n"); exit(-1); } char* filename = argv[1]; char* delim = "."; std::string fn = strtok(strdup(filename), delim); //used for output //store board as flattened 9*9 int array int* board = new int[N*N]; load(filename, board); int* d_frontierBoards; //start BFS from these boards int* d_childBoards; //boards generated from iteration of BFS int* d_emptySpaces; //location of empty spaces in the boards int* d_emptySpacesCount; //number of empty spaces in each board int* d_boardIdx; //index within child boards // amount of space to allocate for boards generated from BFS const int maxBoardsGen = pow(2,26); hipMalloc(&d_emptySpaces, maxBoardsGen*sizeof(int)); hipMalloc(&d_emptySpacesCount, (maxBoardsGen/81 + 1) * sizeof(int)); hipMalloc(&d_frontierBoards, maxBoardsGen*sizeof(int)); hipMalloc(&d_childBoards, maxBoardsGen*sizeof(int)); hipMalloc(&d_boardIdx, sizeof(int)); hipMemset(d_boardIdx, 0, sizeof(int)); hipMemset(d_frontierBoards, 0, maxBoardsGen*sizeof(int)); hipMemset(d_childBoards, 0, maxBoardsGen*sizeof(int)); //ToDo: optimize block sizes int numBlocks = 512; int threadsPerBlock = 256; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(threadsPerBlock, 1, 1); /*** * Generate BFS of guesses, * each guess generates a child board ***/ //copy given board to frontier hipMemcpy(d_frontierBoards, board, N*N*sizeof(int), hipMemcpyHostToDevice); int totalBoards = 1; //call once as set up for the loop hipLaunchKernelGGL(( genChildBoards), dim3(dimGrid), dim3(dimBlock), 0, 0, d_frontierBoards, d_childBoards, totalBoards, d_boardIdx, d_emptySpaces, d_emptySpacesCount); int BFSiterations = 18; for(int i=0; i < BFSiterations; i++) { hipMemcpy(&totalBoards, d_boardIdx, sizeof(int), hipMemcpyDeviceToHost); // printf("total boards after an iteration %d: %d\n", i, totalBoards); hipMemset(d_boardIdx, 0, sizeof(int)); if(i%2 == 0) { hipLaunchKernelGGL(( genChildBoards), dim3(dimGrid), dim3(dimBlock), 0, 0, d_childBoards, d_frontierBoards, totalBoards, d_boardIdx, d_emptySpaces, d_emptySpacesCount); } else { hipLaunchKernelGGL(( genChildBoards), dim3(dimGrid), dim3(dimBlock), 0, 0, d_frontierBoards, d_childBoards, totalBoards, d_boardIdx, d_emptySpaces, d_emptySpacesCount); } } hipMemcpy(&totalBoards, d_boardIdx, sizeof(int), hipMemcpyDeviceToHost); // printf("total boards: %d\n", totalBoards); int* d_found; int* d_solution; //solved board hipMalloc(&d_found, sizeof(int)); hipMalloc(&d_solution, N*N*sizeof(int)); hipMemset(d_found, false, sizeof(int)); hipMemcpy(d_solution, board, N*N*sizeof(int), hipMemcpyHostToDevice); if(BFSiterations % 2 == 1) { d_childBoards = d_frontierBoards; } /*** * Check generated boards for a solution * in separate threads using backtracking ***/ hipLaunchKernelGGL(( findSolution), dim3(dimGrid), dim3(dimBlock), 0, 0, d_childBoards, totalBoards, d_emptySpaces, d_emptySpacesCount, d_found, d_solution); hipDeviceSynchronize(); cudaCheckError(); //copy solution back to host int* h_solution = new int[N*N]; hipMemset(h_solution, 0, N*N*sizeof(int)); hipMemcpy(h_solution, d_solution, N*N*sizeof(int), hipMemcpyDeviceToHost); // printBoard(h_solution); writeBoard(fn, h_solution); delete[] board; delete[] h_solution; hipFree(d_emptySpaces); hipFree(d_emptySpacesCount); hipFree(d_childBoards); hipFree(d_frontierBoards); hipFree(d_boardIdx); hipFree(d_found); hipFree(d_solution); return 0; }
6821ff8d9f7ed06d3a37d2dd80cf367bf21006e8.cu
#include <cstdio> #include <cstdlib> #include <cmath> #include <vector> #include <fstream> #include <iostream> #include <algorithm> #include <curand.h> #include <string> #define N 9 #define n 3 #define cudaCheckError() { \ cudaError_t error = cudaGetLastError(); \ if(error != cudaSuccess) { \ printf("CUDA error: %s\n", cudaGetErrorString(error)); \ exit(-1); \ } \ } void load(char* filename, int* board) { FILE * f = fopen(filename, "r"); if(f == NULL) { printf("Could not open file\n"); return; } char tmpBuff; for(int i=0; i < N; i++) { for(int j=0; j < N; j++) { if(!fscanf(f, "%c\n", &tmpBuff)) { printf("Error reading char\n"); return; } if(tmpBuff >= '1' && tmpBuff <= '9') { board[i*N + j] = (int) (tmpBuff - '0'); } else { board[i*N + j] = 0; } } } fclose(f); } void printBoard(int* board) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%d", board[i*N + j]); } printf("\n"); } } void writeBoard(std::string fn, int* board) { std::ofstream f; std::string filename = fn + ".sol"; // printf(filename.c_str()); f.open(filename.c_str()); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { f << board[i*N + j]; } f << "\n"; } f.close(); } __global__ void genChildBoards(int* frontierBoards, int* childBoards, int numFrontierBoards, int* boardIdx, int* emptySpaces, int* emptySpacesCount) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int offset = idx * N*N; while(idx < numFrontierBoards) { //each thread searches one N*N board for next empty space bool found = false; for(int i = offset; (found == false) && (i < offset + N*N); i++) { if(frontierBoards[i] == 0) { found = true; //now generate a child board with valid input within the constraints int row = (i - offset) / N; int col = (i - offset) % N; //guess integer between [1-N] for(int guess = 1; guess <= N; guess++){ bool valid = true; //check if guess exists in same row for(int c=0; c < N; c++) { if(frontierBoards[row*N + c + offset] == guess) { valid = false; } } //check if guess exists in same column for(int r=0; r < N; r++) { if(frontierBoards[r*N + col + offset] == guess) { valid = false; } } //check if guess exists in same 3x3 box for(int r = n*(row/n); r < n; r++) { for(int c = n*(col/n); c < n; c++) { if(frontierBoards[r*N + c + offset] == guess) { valid = false; } } } //persist the new child board if(valid == true) { int childBoardIdx = atomicAdd(boardIdx, 1); //multiple threads updating this index int emptyIdx = 0; for(int r=0; r < N; r++) { for(int c=0; c < N; c++) { childBoards[childBoardIdx*N*N + r*N + c] = frontierBoards[idx*N*N + r*N + c]; if(frontierBoards[idx*N*N + r*N + c] == 0 && (r != row || c != col)) { emptySpaces[emptyIdx + N*N*childBoardIdx] = r*9 + c; emptyIdx++; } } emptySpacesCount[childBoardIdx] = emptyIdx; //num empty spaces on board childBoards[childBoardIdx*N*N + row*N + col] = guess; } } } } } idx += blockDim.x * gridDim.x; } } __device__ void resetBitmap(bool* map, int mapsize) { for(int i=0; i < mapsize; i++) { map[i] = false; } } //checks if value has been seen earlier //if it has the board is invalid __device__ bool seenIt(int val, bool* seen) { if (val != 0) { if (seen[val-1]) { return true; } seen[val-1] = true; return false; } return false; } //check entire board for value __device__ bool valid(const int* board) { bool seen[N]; resetBitmap(seen, N); //check row for repetitions for(int i=0; i<N; i++) { resetBitmap(seen, N); for(int j=0; j<N; j++) { int v = board[i*N + j]; if(seenIt(v, seen)) { return false; } } } //check col for repetitions for(int j=0; j<N; j++) { resetBitmap(seen, N); for(int i=0; i<N; i++) { int v = board[i*N + j]; if(seenIt(v, seen)) { return false; } } } //check 3x3 for repetitions for(int ridx=0; ridx < n; ridx++) { for(int cidx=0; cidx < n; cidx++) { resetBitmap(seen, N); for(int i=0; i<n; i++) { for(int j=0; j<n; j++) { int v = board[(ridx*n + i)*N + (cidx*n + j)]; if(seenIt(v, seen)) { return false; } } } } } return true; } //chech if change is valud __device__ bool valid(const int* board, int changedIdx) { int r = changedIdx / 9; int c = changedIdx % 9; bool seen[N]; resetBitmap(seen, N); if(changedIdx < 0) { return valid(board); } if((board[changedIdx] < 1) || (board[changedIdx] > 9)) { return false; } //check for repetitions in row for(int i=0; i < N; i++) { int v = board[r*N + i]; if(seenIt(v, seen)) { return false; } } //check for repetitions in col resetBitmap(seen, N); for(int i=0; i < N; i++) { int v = board[i*N + c]; if(seenIt(v, seen)) { return false; } } //check 3x3 for repetitions int ridx = r / n; int cidx = c / n; resetBitmap(seen, N); for (int i=0; i < n; i++) { for (int j=0; j < n; j++) { int v = board[(ridx*n + i)*N + (cidx*n + j)]; if(seenIt(v, seen)) { return false; } } } return true; } __global__ void findSolution(int* boards, const int numBoards, int* emptySpaces, int* emptySpacesCount, int* found, int* solution) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int* curBoard; int* curEmptySpace; int curEmptySpaceCount; while ((*found == 0) && (idx < numBoards)) { int emptyIdx = 0; curBoard = boards + idx*N*N; curEmptySpace = emptySpaces + idx*N*N; curEmptySpaceCount = emptySpacesCount[idx]; while((emptyIdx >= 0) && (emptyIdx < curEmptySpaceCount)) { curBoard[curEmptySpace[emptyIdx]]++; if(!valid(curBoard, curEmptySpace[emptyIdx])) { //backtrack if(curBoard[curEmptySpace[emptyIdx]] >= 9) { //reset curBoard[curEmptySpace[emptyIdx]] = 0; emptyIdx--; } } else { emptyIdx++; } } // # if __CUDA_ARCH__>=200 // printf("emptyIdx = %d \n", emptyIdx); // printf("curEmptySpaceCount = %d \n", curEmptySpaceCount); // #endif if(emptyIdx == curEmptySpaceCount) { // # if __CUDA_ARCH__>=200 // printf("found solution \n"); // #endif *found = 1; for(int i=0; i < N*N; i++) { solution[i] = curBoard[i]; } } idx += gridDim.x * blockDim.x; } } // magic starts here int main(int argc, char* argv[]) { if (argc < 2){ printf("Usage: sudokusolver (filename.in)\n"); exit(-1); } char* filename = argv[1]; char* delim = "."; std::string fn = strtok(strdup(filename), delim); //used for output //store board as flattened 9*9 int array int* board = new int[N*N]; load(filename, board); int* d_frontierBoards; //start BFS from these boards int* d_childBoards; //boards generated from iteration of BFS int* d_emptySpaces; //location of empty spaces in the boards int* d_emptySpacesCount; //number of empty spaces in each board int* d_boardIdx; //index within child boards // amount of space to allocate for boards generated from BFS const int maxBoardsGen = pow(2,26); cudaMalloc(&d_emptySpaces, maxBoardsGen*sizeof(int)); cudaMalloc(&d_emptySpacesCount, (maxBoardsGen/81 + 1) * sizeof(int)); cudaMalloc(&d_frontierBoards, maxBoardsGen*sizeof(int)); cudaMalloc(&d_childBoards, maxBoardsGen*sizeof(int)); cudaMalloc(&d_boardIdx, sizeof(int)); cudaMemset(d_boardIdx, 0, sizeof(int)); cudaMemset(d_frontierBoards, 0, maxBoardsGen*sizeof(int)); cudaMemset(d_childBoards, 0, maxBoardsGen*sizeof(int)); //ToDo: optimize block sizes int numBlocks = 512; int threadsPerBlock = 256; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(threadsPerBlock, 1, 1); /*** * Generate BFS of guesses, * each guess generates a child board ***/ //copy given board to frontier cudaMemcpy(d_frontierBoards, board, N*N*sizeof(int), cudaMemcpyHostToDevice); int totalBoards = 1; //call once as set up for the loop genChildBoards<<<dimGrid, dimBlock>>> (d_frontierBoards, d_childBoards, totalBoards, d_boardIdx, d_emptySpaces, d_emptySpacesCount); int BFSiterations = 18; for(int i=0; i < BFSiterations; i++) { cudaMemcpy(&totalBoards, d_boardIdx, sizeof(int), cudaMemcpyDeviceToHost); // printf("total boards after an iteration %d: %d\n", i, totalBoards); cudaMemset(d_boardIdx, 0, sizeof(int)); if(i%2 == 0) { genChildBoards<<<dimGrid, dimBlock>>> (d_childBoards, d_frontierBoards, totalBoards, d_boardIdx, d_emptySpaces, d_emptySpacesCount); } else { genChildBoards<<<dimGrid, dimBlock>>> (d_frontierBoards, d_childBoards, totalBoards, d_boardIdx, d_emptySpaces, d_emptySpacesCount); } } cudaMemcpy(&totalBoards, d_boardIdx, sizeof(int), cudaMemcpyDeviceToHost); // printf("total boards: %d\n", totalBoards); int* d_found; int* d_solution; //solved board cudaMalloc(&d_found, sizeof(int)); cudaMalloc(&d_solution, N*N*sizeof(int)); cudaMemset(d_found, false, sizeof(int)); cudaMemcpy(d_solution, board, N*N*sizeof(int), cudaMemcpyHostToDevice); if(BFSiterations % 2 == 1) { d_childBoards = d_frontierBoards; } /*** * Check generated boards for a solution * in separate threads using backtracking ***/ findSolution<<<dimGrid, dimBlock>>> (d_childBoards, totalBoards, d_emptySpaces, d_emptySpacesCount, d_found, d_solution); cudaDeviceSynchronize(); cudaCheckError(); //copy solution back to host int* h_solution = new int[N*N]; cudaMemset(h_solution, 0, N*N*sizeof(int)); cudaMemcpy(h_solution, d_solution, N*N*sizeof(int), cudaMemcpyDeviceToHost); // printBoard(h_solution); writeBoard(fn, h_solution); delete[] board; delete[] h_solution; cudaFree(d_emptySpaces); cudaFree(d_emptySpacesCount); cudaFree(d_childBoards); cudaFree(d_frontierBoards); cudaFree(d_boardIdx); cudaFree(d_found); cudaFree(d_solution); return 0; }
67540ddaf2e22634be703dba7c39ff93350bd1c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <utility> #include "amir_cuda_util/cuda_util.h" #include "reduceUtils_hip.cuh" namespace amirstan { namespace cuda { template <typename T> struct unary_linear_index_to_reduce_index : public thrust::unary_function<T, T> { int nCols; __host__ __device__ unary_linear_index_to_reduce_index(int nCols) : nCols(nCols) {} __host__ __device__ T operator()(T i) { return i / nCols; } }; template <typename T> struct unary_divide_by_scalar : public thrust::unary_function<T, T> { const T a; unary_divide_by_scalar(T _a) : a(_a) {} __host__ __device__ T operator()(const T &x) const { return x / a; } }; template <typename T> void tensorMean(T *dst, T *src, int *src_size, bool *reduce_dims, int dims, hipStream_t stream, void *workspace) { size_t src_length = 1; size_t dst_length = 1; size_t mean_length = 1; int num_keep = 0; for (int i = 0; i < dims; ++i) { src_length *= src_size[i]; if (!reduce_dims[i]) { dst_length *= src_size[i]; num_keep += 1; } else { mean_length *= src_size[i]; } } // compute permute dims bool need_permute = false; int keep_start = 0; int reduce_start = num_keep; int *permute = new int(dims); for (int i = 0; i < dims; ++i) { if (!reduce_dims[i]) { permute[keep_start++] = i; if (i >= num_keep) { need_permute = true; } } else { permute[reduce_start++] = i; } } // create working memory T *permute_src; if (need_permute) { if (workspace) { permute_src = (T *)workspace; } else { hipMalloc(&permute_src, src_length * sizeof(T)); } memcpyPermute<T>(permute_src, src, src_size, permute, dims, stream); } else { permute_src = src; } reduce2DContigous<T>(dst, permute_src, (T)0., src_length, dst_length, thrust::identity<T>{}, reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); if (need_permute && workspace == nullptr) { hipFree(permute_src); } delete[] permute; } template void tensorMean<float>(float *dst, float *src, int *src_size, bool *reduce_dims, int dims, hipStream_t stream, void *workspace); // tensorMeanVar template <typename T> struct subMeanSquareOp { const T *mean; subMeanSquareOp(const T *mean) : mean(mean) {} inline __device__ T operator()(const T &x) { const size_t slice_id = getLinearBlockId(); const T sub_mean = x - mean[slice_id]; return sub_mean * sub_mean; } }; // template <typename T> // __global__ void subMeanSquareInplace(T *src, const T* mean, int src_size, int // nCols){ // CUDA_KERNEL_LOOP(i, src_size){ // const int mean_id = i/nCols; // const T sub_mean = src[i] - mean[mean_id]; // src[i] = sub_mean*sub_mean; // } // } template <typename T> void tensorMeanVar(T *mean_dst, T *var_dst, const T *src, int *src_size, bool *reduce_dims, int dims, hipStream_t stream, void *workspace) { size_t src_length = 1; size_t dst_length = 1; size_t mean_length = 1; int num_keep = 0; for (int i = 0; i < dims; ++i) { src_length *= src_size[i]; if (!reduce_dims[i]) { dst_length *= src_size[i]; num_keep += 1; } else { mean_length *= src_size[i]; } } // compute permute dims bool need_permute = false; int keep_start = 0; int reduce_start = num_keep; int *permute = new int(dims); for (int i = 0; i < dims; ++i) { if (!reduce_dims[i]) { permute[keep_start++] = i; if (i >= num_keep) { need_permute = true; } } else { permute[reduce_start++] = i; } } // create working memory T *permute_src; if (workspace) { permute_src = (T *)workspace; } else { hipMalloc(&permute_src, src_length * sizeof(T)); } if (need_permute) { memcpyPermute<T>(permute_src, src, src_size, permute, dims, stream); reduce2DContigous<T>(mean_dst, permute_src, (T)0., src_length, dst_length, thrust::identity<T>{}, reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); reduce2DContigous<T>(var_dst, permute_src, (T)0., src_length, dst_length, subMeanSquareOp<T>(mean_dst), reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); } else { reduce2DContigous<T>(mean_dst, src, (T)0., src_length, dst_length, thrust::identity<T>{}, reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); reduce2DContigous<T>(var_dst, src, (T)0., src_length, dst_length, subMeanSquareOp<T>(mean_dst), reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); // hipMemcpyAsync(permute_src, src, src_length*sizeof(T), // hipMemcpyDeviceToDevice, stream); permute_src = src; } if (need_permute && workspace == nullptr) { hipFree(permute_src); } delete[] permute; } template void tensorMeanVar<float>(float *mean_dst, float *vat_dst, const float *src, int *src_size, bool *reduce_dims, int dims, hipStream_t stream, void *workspace); } // namespace cuda } // namespace amirstan
67540ddaf2e22634be703dba7c39ff93350bd1c3.cu
#include <stdio.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <utility> #include "amir_cuda_util/cuda_util.h" #include "reduceUtils.cuh" namespace amirstan { namespace cuda { template <typename T> struct unary_linear_index_to_reduce_index : public thrust::unary_function<T, T> { int nCols; __host__ __device__ unary_linear_index_to_reduce_index(int nCols) : nCols(nCols) {} __host__ __device__ T operator()(T i) { return i / nCols; } }; template <typename T> struct unary_divide_by_scalar : public thrust::unary_function<T, T> { const T a; unary_divide_by_scalar(T _a) : a(_a) {} __host__ __device__ T operator()(const T &x) const { return x / a; } }; template <typename T> void tensorMean(T *dst, T *src, int *src_size, bool *reduce_dims, int dims, cudaStream_t stream, void *workspace) { size_t src_length = 1; size_t dst_length = 1; size_t mean_length = 1; int num_keep = 0; for (int i = 0; i < dims; ++i) { src_length *= src_size[i]; if (!reduce_dims[i]) { dst_length *= src_size[i]; num_keep += 1; } else { mean_length *= src_size[i]; } } // compute permute dims bool need_permute = false; int keep_start = 0; int reduce_start = num_keep; int *permute = new int(dims); for (int i = 0; i < dims; ++i) { if (!reduce_dims[i]) { permute[keep_start++] = i; if (i >= num_keep) { need_permute = true; } } else { permute[reduce_start++] = i; } } // create working memory T *permute_src; if (need_permute) { if (workspace) { permute_src = (T *)workspace; } else { cudaMalloc(&permute_src, src_length * sizeof(T)); } memcpyPermute<T>(permute_src, src, src_size, permute, dims, stream); } else { permute_src = src; } reduce2DContigous<T>(dst, permute_src, (T)0., src_length, dst_length, thrust::identity<T>{}, reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); if (need_permute && workspace == nullptr) { cudaFree(permute_src); } delete[] permute; } template void tensorMean<float>(float *dst, float *src, int *src_size, bool *reduce_dims, int dims, cudaStream_t stream, void *workspace); // tensorMeanVar template <typename T> struct subMeanSquareOp { const T *mean; subMeanSquareOp(const T *mean) : mean(mean) {} inline __device__ T operator()(const T &x) { const size_t slice_id = getLinearBlockId(); const T sub_mean = x - mean[slice_id]; return sub_mean * sub_mean; } }; // template <typename T> // __global__ void subMeanSquareInplace(T *src, const T* mean, int src_size, int // nCols){ // CUDA_KERNEL_LOOP(i, src_size){ // const int mean_id = i/nCols; // const T sub_mean = src[i] - mean[mean_id]; // src[i] = sub_mean*sub_mean; // } // } template <typename T> void tensorMeanVar(T *mean_dst, T *var_dst, const T *src, int *src_size, bool *reduce_dims, int dims, cudaStream_t stream, void *workspace) { size_t src_length = 1; size_t dst_length = 1; size_t mean_length = 1; int num_keep = 0; for (int i = 0; i < dims; ++i) { src_length *= src_size[i]; if (!reduce_dims[i]) { dst_length *= src_size[i]; num_keep += 1; } else { mean_length *= src_size[i]; } } // compute permute dims bool need_permute = false; int keep_start = 0; int reduce_start = num_keep; int *permute = new int(dims); for (int i = 0; i < dims; ++i) { if (!reduce_dims[i]) { permute[keep_start++] = i; if (i >= num_keep) { need_permute = true; } } else { permute[reduce_start++] = i; } } // create working memory T *permute_src; if (workspace) { permute_src = (T *)workspace; } else { cudaMalloc(&permute_src, src_length * sizeof(T)); } if (need_permute) { memcpyPermute<T>(permute_src, src, src_size, permute, dims, stream); reduce2DContigous<T>(mean_dst, permute_src, (T)0., src_length, dst_length, thrust::identity<T>{}, reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); reduce2DContigous<T>(var_dst, permute_src, (T)0., src_length, dst_length, subMeanSquareOp<T>(mean_dst), reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); } else { reduce2DContigous<T>(mean_dst, src, (T)0., src_length, dst_length, thrust::identity<T>{}, reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); reduce2DContigous<T>(var_dst, src, (T)0., src_length, dst_length, subMeanSquareOp<T>(mean_dst), reduceSumOp<T>(), divScalarOp<T>(mean_length), stream); // cudaMemcpyAsync(permute_src, src, src_length*sizeof(T), // cudaMemcpyDeviceToDevice, stream); permute_src = src; } if (need_permute && workspace == nullptr) { cudaFree(permute_src); } delete[] permute; } template void tensorMeanVar<float>(float *mean_dst, float *vat_dst, const float *src, int *src_size, bool *reduce_dims, int dims, cudaStream_t stream, void *workspace); } // namespace cuda } // namespace amirstan
676723213d8410d639636f33e8797d5079e6699b.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example is almost the same as example 27 which uses 3xTF32 to run GEMM. The only difference is that this example uses 3xtf32 on complex gemm. To enable this feature, the only change needs to make is to change OpMultiplyAddComplex to OpMultiplyAddComplexFastF32. */ #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_complex.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; hipError_t error; int m, n, k; double l2_norm_3xtf32_vs_fp64; double l2_norm_1xtf32_vs_fp64; double l2_norm_fp32_vs_fp64; // ctor Result( int m, int n, int k, double runtime_ms, double gflops, double l2_norm_3xtf32_vs_fp64, double l2_norm_1xtf32_vs_fp64, double l2_norm_fp32_vs_fp64) : m(m), n(n), k(k), runtime_ms(runtime_ms), gflops(gflops), l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64), l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64), l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {} Result() {} // // Methods // static void print_csv_header() { std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl; } void print_csv_row() { std::cout << m << "," << n << "," << k << "," << runtime_ms << "," << gflops << "," << l2_norm_3xtf32_vs_fp64 << "," << l2_norm_1xtf32_vs_fp64 << "," << l2_norm_fp32_vs_fp64 << std::endl; } }; std::vector<Result> results; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; std::string rand_mode; int iterations; int seed; bool benchmark; Options(): help(false), problem_size({3456, 4096, 4096}), iterations(20), seed(1), alpha(1), beta(), rand_mode("uniform"), benchmark(false) { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("rand_mode", rand_mode); if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm example\n\n" << " This example uses the CUTLASS Library to emulate FP32 complex GEMM computations with TF32 tensor cores.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --rand_mode=<string> gauss / uniform*\n\n" << " --seed=<int> Random number seed (1*)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_ampere_3xtf32_fast_accurate_complex_gemm --m=1024 --n=512 \\\n" << " --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product(); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<64, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<32, 32, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< cutlass::complex<float>, // <- data type of output matrix 1, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too cutlass::complex<float>, // <- data type of accumulator cutlass::complex<float>>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; // Transform constexpr cutlass::ComplexTransform TransformA = cutlass::ComplexTransform::kNone; constexpr cutlass::ComplexTransform TransformB = cutlass::ComplexTransform::kNone; // // Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64) // // Gemm_3xTF32 using Gemm_3xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplexFastF32>; // Gemm_1xTF32 using Gemm_1xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplex>; bool run(Options &options) { // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size = options.problem_size; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// cutlass::HostTensor<cutlass::complex<float>, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<float>, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N if (options.rand_mode == "uniform") { const float min = -1; const float max = 1; // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix C on host with uniform-distribution random data } else if (options.rand_mode == "gauss") { // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomGaussian( tensor_a_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix A on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_b_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix B on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_c_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix C on host with gaussian-distribution random data } cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // <- fill matrix D on host with zeros // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F64 tensors using the same values used for F32 //////////////////////////////////////////////////////////////////////////////// // Gemm input operands (A, B, C) cutlass::HostTensor<cutlass::complex<double>, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<double>, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N // Gemm output (D) for GEMM_F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_3xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_1xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); // Initialize alpha and beta for dot product computation cutlass::complex<float> alpha = cutlass::complex<float>(options.alpha); cutlass::complex<float> beta = cutlass::complex<float>(options.beta); // Split K dimension into 1 partitions int split_k_slices = 1; //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_3xTF32 gemm_op; // Check the problem size is supported or not cutlass::Status status_3xtf32 = gemm_op.can_implement(arguments_3xtf32); CUTLASS_CHECK(status_3xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_3xtf32 = gemm_op.initialize(arguments_3xtf32, workspace_3xtf32.get()); CUTLASS_CHECK(status_3xtf32); // Result structure Result result; // // Construct events // hipEvent_t events[2]; for (auto & event : events) { result.error = hipEventCreate(&event); if (result.error != hipSuccess) { std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl; return false; } } // Record an event at the start of a series of GEMMs result.error = hipEventRecord(events[0]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // Launch initialized CUTLASS kernel status_3xtf32 = gemm_op(); CUTLASS_CHECK(status_3xtf32); } // // Stop profiling loop // // Record an event when the GEMMs are complete result.error = hipEventRecord(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // Wait for work on the device to complete. result.error = hipEventSynchronize(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // Measure elapsed runtime float runtime_ms = 0; result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != hipSuccess) { std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl; return false; } // Compute average runtime and GFLOPs. result.m = problem_size.m(); result.n = problem_size.n(); result.k = problem_size.k(); result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)hipEventDestroy(event); } tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run TF32 kernel without profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_1xTF32 gemm_op_1xtf32; // Check the problem size is supported or not cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32); CUTLASS_CHECK(status_1xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get()); CUTLASS_CHECK(status_1xtf32); // Launch initialized CUTLASS kernel status_1xtf32 = gemm_op_1xtf32(); CUTLASS_CHECK(status_1xtf32); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F64) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F64.device_ref(), TransformA, tensor_b_F64.device_ref(), TransformB, beta, tensor_c_F64.device_ref(), tensor_d_F64.device_ref(), cutlass::complex<double>(0.f)); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F32) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F32.device_ref(), TransformA, tensor_b_F32.device_ref(), TransformB, beta, tensor_c_F32.device_ref(), tensor_d_F32.device_ref(), cutlass::complex<float>(0.f)); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /////// Compute l2 norms //////////////////////////////////////////////////////////////////////////////// // l2 norm 3xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm F32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view()); result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view()); results.push_back(result); /////////////////////////////////////////////////////////////////////////////// // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << std::fixed; std::cout.precision(4); std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout.precision(2); std::cout << "GFLOPs: " << result.gflops << std::endl; std::cout << "Normalized L2 norm of" << std::endl; std::cout.precision(8); std::cout << std::scientific << " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl << " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl << " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl; return true; } int main(int argc, const char **argv) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return false; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } bool result = true; if (options.benchmark) { for (int k = 4; k <= 65536; k *= 2) { options.problem_size[2] = k; printf("Gemm problem size: %d x %d x %d\n", \ options.problem_size.m(), options.problem_size.n(), options.problem_size.k()); if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result &= run(options); } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result = run(options); } if (!result) return -1; std::cout << std::endl << "CSV results" << std::endl; Result::print_csv_header(); for(auto &r : results) r.print_csv_row(); return 0; }
676723213d8410d639636f33e8797d5079e6699b.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example is almost the same as example 27 which uses 3xTF32 to run GEMM. The only difference is that this example uses 3xtf32 on complex gemm. To enable this feature, the only change needs to make is to change OpMultiplyAddComplex to OpMultiplyAddComplexFastF32. */ #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_complex.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; int m, n, k; double l2_norm_3xtf32_vs_fp64; double l2_norm_1xtf32_vs_fp64; double l2_norm_fp32_vs_fp64; // ctor Result( int m, int n, int k, double runtime_ms, double gflops, double l2_norm_3xtf32_vs_fp64, double l2_norm_1xtf32_vs_fp64, double l2_norm_fp32_vs_fp64) : m(m), n(n), k(k), runtime_ms(runtime_ms), gflops(gflops), l2_norm_3xtf32_vs_fp64(l2_norm_3xtf32_vs_fp64), l2_norm_1xtf32_vs_fp64(l2_norm_1xtf32_vs_fp64), l2_norm_fp32_vs_fp64(l2_norm_fp32_vs_fp64) {} Result() {} // // Methods // static void print_csv_header() { std::cout << "M,N,K,Runtime(ms),GFLOPS,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64" << std::endl; } void print_csv_row() { std::cout << m << "," << n << "," << k << "," << runtime_ms << "," << gflops << "," << l2_norm_3xtf32_vs_fp64 << "," << l2_norm_1xtf32_vs_fp64 << "," << l2_norm_fp32_vs_fp64 << std::endl; } }; std::vector<Result> results; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; std::string rand_mode; int iterations; int seed; bool benchmark; Options(): help(false), problem_size({3456, 4096, 4096}), iterations(20), seed(1), alpha(1), beta(), rand_mode("uniform"), benchmark(false) { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("rand_mode", rand_mode); if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm example\n\n" << " This example uses the CUTLASS Library to emulate FP32 complex GEMM computations with TF32 tensor cores.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --rand_mode=<string> gauss / uniform*\n\n" << " --seed=<int> Random number seed (1*)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_ampere_3xtf32_fast_accurate_complex_gemm --m=1024 --n=512 \\\n" << " --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product(); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<64, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<32, 32, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< cutlass::complex<float>, // <- data type of output matrix 1, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too cutlass::complex<float>, // <- data type of accumulator cutlass::complex<float>>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; // Transform constexpr cutlass::ComplexTransform TransformA = cutlass::ComplexTransform::kNone; constexpr cutlass::ComplexTransform TransformB = cutlass::ComplexTransform::kNone; // // Gemm Operators (Gemm_3xTF32, Gemm_1xTF32, GEMM_F32, GEMM_F64) // // Gemm_3xTF32 using Gemm_3xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplexFastF32>; // Gemm_1xTF32 using Gemm_1xTF32 = cutlass::gemm::device::GemmComplex< cutlass::complex<float>, LayoutInputA, cutlass::complex<float>, LayoutInputB, cutlass::complex<float>, LayoutOutput, cutlass::complex<float>, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, TransformA, TransformB, cutlass::arch::OpMultiplyAddComplex>; bool run(Options &options) { // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size = options.problem_size; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// cutlass::HostTensor<cutlass::complex<float>, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<float>, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N if (options.rand_mode == "uniform") { const float min = -1; const float max = 1; // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix C on host with uniform-distribution random data } else if (options.rand_mode == "gauss") { // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomGaussian( tensor_a_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix A on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_b_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix B on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_c_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix C on host with gaussian-distribution random data } cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // <- fill matrix D on host with zeros // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F64 tensors using the same values used for F32 //////////////////////////////////////////////////////////////////////////////// // Gemm input operands (A, B, C) cutlass::HostTensor<cutlass::complex<double>, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<cutlass::complex<double>, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N // Gemm output (D) for GEMM_F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_3xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Gemm output (D) for GEMM_1xTF32 cutlass::HostTensor<cutlass::complex<float>, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); // Initialize alpha and beta for dot product computation cutlass::complex<float> alpha = cutlass::complex<float>(options.alpha); cutlass::complex<float> beta = cutlass::complex<float>(options.beta); // Split K dimension into 1 partitions int split_k_slices = 1; //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel within a profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_3xTF32::Arguments arguments_3xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_3xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_3xtf32 = Gemm_3xTF32::get_workspace_size(arguments_3xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_3xTF32 gemm_op; // Check the problem size is supported or not cutlass::Status status_3xtf32 = gemm_op.can_implement(arguments_3xtf32); CUTLASS_CHECK(status_3xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_3xtf32 = gemm_op.initialize(arguments_3xtf32, workspace_3xtf32.get()); CUTLASS_CHECK(status_3xtf32); // Result structure Result result; // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } } // Record an event at the start of a series of GEMMs result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // Launch initialized CUTLASS kernel status_3xtf32 = gemm_op(); CUTLASS_CHECK(status_3xtf32); } // // Stop profiling loop // // Record an event when the GEMMs are complete result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return false; } // Compute average runtime and GFLOPs. result.m = problem_size.m(); result.n = problem_size.n(); result.k = problem_size.k(); result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run TF32 kernel without profiling loop //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm_1xTF32::Arguments arguments_1xtf32{problem_size, // <- problem size of matrix multiplication tensor_a_F32.device_ref(), // <- reference to matrix A on device tensor_b_F32.device_ref(), // <- reference to matrix B on device tensor_c_F32.device_ref(), // <- reference to matrix C on device tensor_d_1xTF32.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_1xtf32 = Gemm_1xTF32::get_workspace_size(arguments_1xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32); // Instantiate CUTLASS kernel depending on templates Gemm_1xTF32 gemm_op_1xtf32; // Check the problem size is supported or not cutlass::Status status_1xtf32 = gemm_op_1xtf32.can_implement(arguments_1xtf32); CUTLASS_CHECK(status_1xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_1xtf32 = gemm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get()); CUTLASS_CHECK(status_1xtf32); // Launch initialized CUTLASS kernel status_1xtf32 = gemm_op_1xtf32(); CUTLASS_CHECK(status_1xtf32); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F64) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F64.device_ref(), TransformA, tensor_b_F64.device_ref(), TransformB, beta, tensor_c_F64.device_ref(), tensor_d_F64.device_ref(), cutlass::complex<double>(0.f)); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// // Run reference kernel (F32) //////////////////////////////////////////////////////////////////////////////// // Launch device reference gemm kernel cutlass::reference::device::GemmComplex( problem_size, alpha, tensor_a_F32.device_ref(), TransformA, tensor_b_F32.device_ref(), TransformB, beta, tensor_c_F32.device_ref(), tensor_d_F32.device_ref(), cutlass::complex<float>(0.f)); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d_F32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /////// Compute l2 norms //////////////////////////////////////////////////////////////////////////////// // l2 norm 3xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm F32 vs F64 cutlass::HostTensor<cutlass::complex<double>, LayoutOutput> tensor_d_F32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view()); result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view()); results.push_back(result); /////////////////////////////////////////////////////////////////////////////// // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << std::fixed; std::cout.precision(4); std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout.precision(2); std::cout << "GFLOPs: " << result.gflops << std::endl; std::cout << "Normalized L2 norm of" << std::endl; std::cout.precision(8); std::cout << std::scientific << " - 3xTF32 error with FP64 reference : " << result.l2_norm_3xtf32_vs_fp64 << std::endl << " - 1xTF32 error with FP64 reference : " << result.l2_norm_1xtf32_vs_fp64 << std::endl << " - FP32 error with FP64 reference : " << result.l2_norm_fp32_vs_fp64 << std::endl; return true; } int main(int argc, const char **argv) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return false; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } bool result = true; if (options.benchmark) { for (int k = 4; k <= 65536; k *= 2) { options.problem_size[2] = k; printf("Gemm problem size: %d x %d x %d\n", \ options.problem_size.m(), options.problem_size.n(), options.problem_size.k()); if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result &= run(options); } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result = run(options); } if (!result) return -1; std::cout << std::endl << "CSV results" << std::endl; Result::print_csv_header(); for(auto &r : results) r.print_csv_row(); return 0; }
1ce06ac0c7fc5d95545199e333b3ad35197b93ae.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8:: do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width< PerChannelBiasVisitor, IConvEpilogue< Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue< Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
1ce06ac0c7fc5d95545199e333b3ad35197b93ae.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8:: do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width< PerChannelBiasVisitor, IConvEpilogue< Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue< Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
2ced364e630981d2b560af542b36ded632bca1a5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cublasLt.h> #include "helpers.h" #include "sample_cublasLt_LtFp8Matmul.h" /// Sample wrapper executing fp8 matmul with cublasLtMatmul, with addition of per-tensor scaling, amax calculations, and /// the workspace to support split-K algorithms. /// /// pointer mode is for alpha and beta is always host, to change it configure the appropriate matmul descriptor /// attribute matmul is not using cublas handle's configuration of math mode, here tensor ops are implicitly allowed; to /// change this configure appropriate attribute in the preference handle void LtFp8Matmul(cublasLtHandle_t ltHandle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float *alpha, /* host pointer */ const float *a_scale, /* device pointer */ const __nv_fp8_e4m3 *A, int lda, const float *b_scale, /* device pointer */ const __nv_fp8_e4m3 *B, int ldb, const float *beta, /* host pointer */ const float *c_scale, /* device pointer */ __nv_fp8_e4m3 *C, int ldc, const float *d_scale, /* device pointer */ float *amax_d, /* device pointer */ void *workspace, size_t workspaceSize) { cublasLtMatmulDesc_t operationDesc = NULL; cublasLtMatrixLayout_t Adesc = NULL, Bdesc = NULL, Cdesc = NULL, Ddesc = NULL; cublasLtMatmulPreference_t preference = NULL; int returnedResults = 0; cublasLtMatmulHeuristicResult_t heuristicResult = {}; // create operation desciriptor; see cublasLtMatmulDescAttributes_t for details about defaults; here we just need to // set the transforms for A and B checkCublasStatus(cublasLtMatmulDescCreate(&operationDesc, CUBLAS_COMPUTE_32F, HIP_R_32F)); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa))); // set scaling factors checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_A_SCALE_POINTER, &a_scale, sizeof(a_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_B_SCALE_POINTER, &b_scale, sizeof(b_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_C_SCALE_POINTER, &c_scale, sizeof(c_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_D_SCALE_POINTER, &d_scale, sizeof(d_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_AMAX_D_POINTER, &amax_d, sizeof(amax_d))); // create matrix descriptors, we are good with the details here so no need to set any extra attributes checkCublasStatus(cublasLtMatrixLayoutCreate(&Adesc, CUDA_R_8F_E4M3, transa == HIPBLAS_OP_N ? m : k, transa == HIPBLAS_OP_N ? k : m, lda)); checkCublasStatus(cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_8F_E4M3, transb == HIPBLAS_OP_N ? k : n, transb == HIPBLAS_OP_N ? n : k, ldb)); checkCublasStatus(cublasLtMatrixLayoutCreate(&Cdesc, CUDA_R_16BF, m, n, ldc)); checkCublasStatus(cublasLtMatrixLayoutCreate(&Ddesc, CUDA_R_8F_E4M3, m, n, ldc)); // create preference handle; here we could use extra attributes to disable tensor ops or to make sure algo selected // will work with badly aligned A, B, C; here for simplicity we just assume A,B,C are always well aligned (e.g. // directly come from hipMalloc) checkCublasStatus(cublasLtMatmulPreferenceCreate(&preference)); checkCublasStatus(cublasLtMatmulPreferenceSetAttribute(preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize))); // we just need the best available heuristic to try and run matmul. There is no guarantee this will work, e.g. if A // is badly aligned, you can request more (e.g. 32) algos and try to run them one by one until something works checkCublasStatus(cublasLtMatmulAlgoGetHeuristic(ltHandle, operationDesc, Adesc, Bdesc, Cdesc, Ddesc, preference, 1, &heuristicResult, &returnedResults)); if (returnedResults == 0) { checkCublasStatus(HIPBLAS_STATUS_NOT_SUPPORTED); } checkCublasStatus(cublasLtMatmul(ltHandle, operationDesc, alpha, A, Adesc, B, Bdesc, beta, C, Cdesc, C, Cdesc, &heuristicResult.algo, workspace, workspaceSize, 0)); // descriptors are no longer needed as all GPU work was already enqueued if (preference) checkCublasStatus(cublasLtMatmulPreferenceDestroy(preference)); if (Cdesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Cdesc)); if (Bdesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Bdesc)); if (Adesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Adesc)); if (operationDesc) checkCublasStatus(cublasLtMatmulDescDestroy(operationDesc)); }
2ced364e630981d2b560af542b36ded632bca1a5.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cublasLt.h> #include "helpers.h" #include "sample_cublasLt_LtFp8Matmul.h" /// Sample wrapper executing fp8 matmul with cublasLtMatmul, with addition of per-tensor scaling, amax calculations, and /// the workspace to support split-K algorithms. /// /// pointer mode is for alpha and beta is always host, to change it configure the appropriate matmul descriptor /// attribute matmul is not using cublas handle's configuration of math mode, here tensor ops are implicitly allowed; to /// change this configure appropriate attribute in the preference handle void LtFp8Matmul(cublasLtHandle_t ltHandle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, /* host pointer */ const float *a_scale, /* device pointer */ const __nv_fp8_e4m3 *A, int lda, const float *b_scale, /* device pointer */ const __nv_fp8_e4m3 *B, int ldb, const float *beta, /* host pointer */ const float *c_scale, /* device pointer */ __nv_fp8_e4m3 *C, int ldc, const float *d_scale, /* device pointer */ float *amax_d, /* device pointer */ void *workspace, size_t workspaceSize) { cublasLtMatmulDesc_t operationDesc = NULL; cublasLtMatrixLayout_t Adesc = NULL, Bdesc = NULL, Cdesc = NULL, Ddesc = NULL; cublasLtMatmulPreference_t preference = NULL; int returnedResults = 0; cublasLtMatmulHeuristicResult_t heuristicResult = {}; // create operation desciriptor; see cublasLtMatmulDescAttributes_t for details about defaults; here we just need to // set the transforms for A and B checkCublasStatus(cublasLtMatmulDescCreate(&operationDesc, CUBLAS_COMPUTE_32F, CUDA_R_32F)); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa))); // set scaling factors checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_A_SCALE_POINTER, &a_scale, sizeof(a_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_B_SCALE_POINTER, &b_scale, sizeof(b_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_C_SCALE_POINTER, &c_scale, sizeof(c_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_D_SCALE_POINTER, &d_scale, sizeof(d_scale))); checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_AMAX_D_POINTER, &amax_d, sizeof(amax_d))); // create matrix descriptors, we are good with the details here so no need to set any extra attributes checkCublasStatus(cublasLtMatrixLayoutCreate(&Adesc, CUDA_R_8F_E4M3, transa == CUBLAS_OP_N ? m : k, transa == CUBLAS_OP_N ? k : m, lda)); checkCublasStatus(cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_8F_E4M3, transb == CUBLAS_OP_N ? k : n, transb == CUBLAS_OP_N ? n : k, ldb)); checkCublasStatus(cublasLtMatrixLayoutCreate(&Cdesc, CUDA_R_16BF, m, n, ldc)); checkCublasStatus(cublasLtMatrixLayoutCreate(&Ddesc, CUDA_R_8F_E4M3, m, n, ldc)); // create preference handle; here we could use extra attributes to disable tensor ops or to make sure algo selected // will work with badly aligned A, B, C; here for simplicity we just assume A,B,C are always well aligned (e.g. // directly come from cudaMalloc) checkCublasStatus(cublasLtMatmulPreferenceCreate(&preference)); checkCublasStatus(cublasLtMatmulPreferenceSetAttribute(preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize))); // we just need the best available heuristic to try and run matmul. There is no guarantee this will work, e.g. if A // is badly aligned, you can request more (e.g. 32) algos and try to run them one by one until something works checkCublasStatus(cublasLtMatmulAlgoGetHeuristic(ltHandle, operationDesc, Adesc, Bdesc, Cdesc, Ddesc, preference, 1, &heuristicResult, &returnedResults)); if (returnedResults == 0) { checkCublasStatus(CUBLAS_STATUS_NOT_SUPPORTED); } checkCublasStatus(cublasLtMatmul(ltHandle, operationDesc, alpha, A, Adesc, B, Bdesc, beta, C, Cdesc, C, Cdesc, &heuristicResult.algo, workspace, workspaceSize, 0)); // descriptors are no longer needed as all GPU work was already enqueued if (preference) checkCublasStatus(cublasLtMatmulPreferenceDestroy(preference)); if (Cdesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Cdesc)); if (Bdesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Bdesc)); if (Adesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Adesc)); if (operationDesc) checkCublasStatus(cublasLtMatmulDescDestroy(operationDesc)); }
7989a7edaa02f9b9580a46fc9769a5fca0849c0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cmath> #include <assert.h> #include <time.h> #include "init.h" /// surface reference surface<void, 2> surfRef5; /// Device array binded to surface hipArray* cuInArray5; /// Used 5x5 mask in constant memory on GPU __constant__ float mask5[5][5]; /// optimal values for the mask float a5 = 1; float b5 = 1.4; float c5 = 2.1969; /// Used 5x5 mask which will be copied to GPU float tmpMask5[][5] = { {-1,c5,-1,c5,-1}, {c5,b5,a5,b5,c5}, {-1,a5,0,a5,-1},{c5,b5,a5,b5,c5}, {-1,c5,-1,c5,-1}}; /// Compute local maxima using surface memory. /** Compute local maxima using surface memory. \param[out] dst Output 8bit matrix with positive numbers (here 255) indicating local maxima. \param[in] w Image widht \param[in] h Image height \return void */ __global__ void calcLM_5x5(BYTE *dst, int w, int h){ int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; int ind = row * w + col; if(row < h && col < w) { float eps = 0; float data; surf2Dread(&data, surfRef5, col* 4, row); bool islm = false; if(data >= 1.5) { float data1; surf2Dread(&data1, surfRef5, (col-1) * 4, row-1, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col-1) * 4, row+0, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col-1) * 4, row+1, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+0) * 4, row-1, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+0) * 4, row+1, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+1) * 4, row-1, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+1) * 4, row+0, hipBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+1) * 4, row+1, hipBoundaryModeClamp); if(data - data1 >= eps) islm = true; }}}}}}} } if(islm) { dst[ind] = 255; } else { dst[ind] = 0; } } } /// Init distance transform. /** Distance transform is set to zero for zero pixels and to infinity for positive pixels. \param[in] src Source array with 8bit binary image. \param[in] w Image widht \param[in] h Image height \return void */ __global__ void initDT_5x5(BYTE *src, int w, int h){ int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; int ind = row * w + col; if(row < h && col < w) { float data = 0.f; if(src[ind] != 0) data = (float)w*h; surf2Dwrite(data, surfRef5, col*4, row); } } /// Computer one iteration of distance transform. /** Apply mask until all threads in one block converge. If at least one thread changed it value, set done to zero. \param[in] w Image widht \param[in] h Image height \param[out] done Binary variable indicating if blocks converged \return void */ __global__ void calcDT_5x5(int w, int h, int *done){ __shared__ int found; bool written = true; int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; bool mainthread = (threadIdx.x + threadIdx.y == 0); if(row < h && col < w) { if(mainthread) { written = false; atomicExch(&found, 1); } __syncthreads(); int inf = 2147483647; float data; float eps = 0; surf2Dread(&data, surfRef5, col* 4, row); if(data > 0 || mainthread) { float newData, oldData; newData = data; oldData = data; while(found > 0) { if(mainthread) { atomicExch(&found, 0); } __syncthreads(); oldData = newData; newData = inf; surf2Dread(&data, surfRef5, (col-1) * 4, row-1, hipBoundaryModeClamp); data += mask5[-1+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row+0, hipBoundaryModeClamp); data += mask5[-1+2][0+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row+1, hipBoundaryModeClamp); data += mask5[-1+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+0) * 4, row-1, hipBoundaryModeClamp); data += mask5[0+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+0) * 4, row+1, hipBoundaryModeClamp); data += mask5[0+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row-1, hipBoundaryModeClamp); data += mask5[1+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row+0, hipBoundaryModeClamp); data += mask5[1+2][0+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row+1, hipBoundaryModeClamp); data += mask5[1+2][1+2]; if(newData - data > eps) newData = data; /// for c's surf2Dread(&data, surfRef5, (col+2) * 4, row-1, hipBoundaryModeClamp); data += mask5[2+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+2) * 4, row+1, hipBoundaryModeClamp); data += mask5[2+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-2) * 4, row-1, hipBoundaryModeClamp); data += mask5[-2+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-2) * 4, row+1, hipBoundaryModeClamp); data += mask5[-2+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row-2, hipBoundaryModeClamp); data += mask5[-1+2][-2+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row-2, hipBoundaryModeClamp); data += mask5[1+2][-2+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row+2, hipBoundaryModeClamp); data += mask5[-1+2][2+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row+2, hipBoundaryModeClamp); data += mask5[1+2][2+2]; if(newData - data > eps) newData = data; if(newData < oldData) { surf2Dwrite(newData, surfRef5, col * 4, row); atomicExch(&found, 1); } __syncthreads(); if(mainthread && found > 0 && !written) { atomicExch(done, 0); written = true; } } } } } /// Compute distance transform and its local maxima on GPU with 5x5 mask. /** The mask is applied in parallel to all pixels until no one changes its value. Distance tranform is stored in surface memory. calcDT is called iteratively until there is no thread with changed value. \param[in] diffData Source array with 8bit binary image. \param[out] dtData Output float array with Euclidean distance transform values. \param[out] lmData Output 8bit array with positive numbers (here 255) indicating local maxima. \param[in] w Image widht \param[in] h Image height \return void */ void gpuDTLM_5x5(const BYTE *diffData, float *dtData, BYTE *lmData, int w, int h) { /// number of threads per blocks in one dimention int TH = 32; dim3 dimBlock(TH,TH); int DW = (int) ceil(w/(float)TH); int DH = (int) ceil(h/(float)TH); dim3 dimGrid(DW,DH); int ARRAY_SIZE = w*h; BYTE *devSrc, *devLM; int *done; int doneCpu = 1; hipMalloc((void **) &done, sizeof(int)); hipMalloc((void **) &devSrc, ARRAY_SIZE * sizeof(BYTE)); hipMalloc((void **) &devLM, ARRAY_SIZE * sizeof(BYTE)); hipMemcpy(devSrc, diffData, ARRAY_SIZE*sizeof(BYTE), hipMemcpyHostToDevice); hipMemcpyToSymbol( mask5, tmpMask5, sizeof(float)*5*5) ; // surface hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipMallocArray(&cuInArray5, &channelDesc, w, h, hipArraySurfaceLoadStore); hipBindSurfaceToArray(surfRef5, cuInArray5, channelDesc); hipLaunchKernelGGL(( initDT_5x5), dim3(dimGrid), dim3(dimBlock), 0, 0, devSrc, w, h); hipDeviceSynchronize(); int i = 0; while(true) { i++; hipMemset(done,1,sizeof(int)); hipLaunchKernelGGL(( calcDT_5x5), dim3(dimGrid), dim3(dimBlock), sizeof(int) , 0, w,h,done); hipDeviceSynchronize(); hipMemcpy(&doneCpu, done, sizeof(int), hipMemcpyDeviceToHost); if(doneCpu > 0) break; } hipDeviceSynchronize(); hipLaunchKernelGGL(( calcLM_5x5), dim3(dimGrid), dim3(dimBlock), 0, 0, devLM, w, h); cudaMemcpyFromArray(dtData, cuInArray5, 0, 0, ARRAY_SIZE * sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy(lmData, devLM, ARRAY_SIZE * sizeof(BYTE), hipMemcpyDeviceToHost ); printf("Finished after %d iterations\n", i); CHECK_ERROR(hipGetLastError()); hipFreeArray(cuInArray5); hipFree(devSrc); hipFree(devLM); hipFree(done); }
7989a7edaa02f9b9580a46fc9769a5fca0849c0e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cmath> #include <assert.h> #include <time.h> #include "init.h" /// surface reference surface<void, 2> surfRef5; /// Device array binded to surface cudaArray* cuInArray5; /// Used 5x5 mask in constant memory on GPU __constant__ float mask5[5][5]; /// optimal values for the mask float a5 = 1; float b5 = 1.4; float c5 = 2.1969; /// Used 5x5 mask which will be copied to GPU float tmpMask5[][5] = { {-1,c5,-1,c5,-1}, {c5,b5,a5,b5,c5}, {-1,a5,0,a5,-1},{c5,b5,a5,b5,c5}, {-1,c5,-1,c5,-1}}; /// Compute local maxima using surface memory. /** Compute local maxima using surface memory. \param[out] dst Output 8bit matrix with positive numbers (here 255) indicating local maxima. \param[in] w Image widht \param[in] h Image height \return void */ __global__ void calcLM_5x5(BYTE *dst, int w, int h){ int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; int ind = row * w + col; if(row < h && col < w) { float eps = 0; float data; surf2Dread(&data, surfRef5, col* 4, row); bool islm = false; if(data >= 1.5) { float data1; surf2Dread(&data1, surfRef5, (col-1) * 4, row-1, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col-1) * 4, row+0, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col-1) * 4, row+1, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+0) * 4, row-1, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+0) * 4, row+1, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+1) * 4, row-1, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+1) * 4, row+0, cudaBoundaryModeClamp); if(data - data1 >= eps) { surf2Dread(&data1, surfRef5, (col+1) * 4, row+1, cudaBoundaryModeClamp); if(data - data1 >= eps) islm = true; }}}}}}} } if(islm) { dst[ind] = 255; } else { dst[ind] = 0; } } } /// Init distance transform. /** Distance transform is set to zero for zero pixels and to infinity for positive pixels. \param[in] src Source array with 8bit binary image. \param[in] w Image widht \param[in] h Image height \return void */ __global__ void initDT_5x5(BYTE *src, int w, int h){ int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; int ind = row * w + col; if(row < h && col < w) { float data = 0.f; if(src[ind] != 0) data = (float)w*h; surf2Dwrite(data, surfRef5, col*4, row); } } /// Computer one iteration of distance transform. /** Apply mask until all threads in one block converge. If at least one thread changed it value, set done to zero. \param[in] w Image widht \param[in] h Image height \param[out] done Binary variable indicating if blocks converged \return void */ __global__ void calcDT_5x5(int w, int h, int *done){ __shared__ int found; bool written = true; int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; bool mainthread = (threadIdx.x + threadIdx.y == 0); if(row < h && col < w) { if(mainthread) { written = false; atomicExch(&found, 1); } __syncthreads(); int inf = 2147483647; float data; float eps = 0; surf2Dread(&data, surfRef5, col* 4, row); if(data > 0 || mainthread) { float newData, oldData; newData = data; oldData = data; while(found > 0) { if(mainthread) { atomicExch(&found, 0); } __syncthreads(); oldData = newData; newData = inf; surf2Dread(&data, surfRef5, (col-1) * 4, row-1, cudaBoundaryModeClamp); data += mask5[-1+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row+0, cudaBoundaryModeClamp); data += mask5[-1+2][0+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row+1, cudaBoundaryModeClamp); data += mask5[-1+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+0) * 4, row-1, cudaBoundaryModeClamp); data += mask5[0+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+0) * 4, row+1, cudaBoundaryModeClamp); data += mask5[0+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row-1, cudaBoundaryModeClamp); data += mask5[1+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row+0, cudaBoundaryModeClamp); data += mask5[1+2][0+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row+1, cudaBoundaryModeClamp); data += mask5[1+2][1+2]; if(newData - data > eps) newData = data; /// for c's surf2Dread(&data, surfRef5, (col+2) * 4, row-1, cudaBoundaryModeClamp); data += mask5[2+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+2) * 4, row+1, cudaBoundaryModeClamp); data += mask5[2+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-2) * 4, row-1, cudaBoundaryModeClamp); data += mask5[-2+2][-1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-2) * 4, row+1, cudaBoundaryModeClamp); data += mask5[-2+2][1+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row-2, cudaBoundaryModeClamp); data += mask5[-1+2][-2+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row-2, cudaBoundaryModeClamp); data += mask5[1+2][-2+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col-1) * 4, row+2, cudaBoundaryModeClamp); data += mask5[-1+2][2+2]; if(newData - data > eps) newData = data; surf2Dread(&data, surfRef5, (col+1) * 4, row+2, cudaBoundaryModeClamp); data += mask5[1+2][2+2]; if(newData - data > eps) newData = data; if(newData < oldData) { surf2Dwrite(newData, surfRef5, col * 4, row); atomicExch(&found, 1); } __syncthreads(); if(mainthread && found > 0 && !written) { atomicExch(done, 0); written = true; } } } } } /// Compute distance transform and its local maxima on GPU with 5x5 mask. /** The mask is applied in parallel to all pixels until no one changes its value. Distance tranform is stored in surface memory. calcDT is called iteratively until there is no thread with changed value. \param[in] diffData Source array with 8bit binary image. \param[out] dtData Output float array with Euclidean distance transform values. \param[out] lmData Output 8bit array with positive numbers (here 255) indicating local maxima. \param[in] w Image widht \param[in] h Image height \return void */ void gpuDTLM_5x5(const BYTE *diffData, float *dtData, BYTE *lmData, int w, int h) { /// number of threads per blocks in one dimention int TH = 32; dim3 dimBlock(TH,TH); int DW = (int) ceil(w/(float)TH); int DH = (int) ceil(h/(float)TH); dim3 dimGrid(DW,DH); int ARRAY_SIZE = w*h; BYTE *devSrc, *devLM; int *done; int doneCpu = 1; cudaMalloc((void **) &done, sizeof(int)); cudaMalloc((void **) &devSrc, ARRAY_SIZE * sizeof(BYTE)); cudaMalloc((void **) &devLM, ARRAY_SIZE * sizeof(BYTE)); cudaMemcpy(devSrc, diffData, ARRAY_SIZE*sizeof(BYTE), cudaMemcpyHostToDevice); cudaMemcpyToSymbol( mask5, tmpMask5, sizeof(float)*5*5) ; // surface cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaMallocArray(&cuInArray5, &channelDesc, w, h, cudaArraySurfaceLoadStore); cudaBindSurfaceToArray(surfRef5, cuInArray5, channelDesc); initDT_5x5<<<dimGrid, dimBlock>>>(devSrc, w, h); cudaDeviceSynchronize(); int i = 0; while(true) { i++; cudaMemset(done,1,sizeof(int)); calcDT_5x5<<<dimGrid, dimBlock, sizeof(int) >>>(w,h,done); cudaDeviceSynchronize(); cudaMemcpy(&doneCpu, done, sizeof(int), cudaMemcpyDeviceToHost); if(doneCpu > 0) break; } cudaDeviceSynchronize(); calcLM_5x5<<<dimGrid, dimBlock>>>(devLM, w, h); cudaMemcpyFromArray(dtData, cuInArray5, 0, 0, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy(lmData, devLM, ARRAY_SIZE * sizeof(BYTE), cudaMemcpyDeviceToHost ); printf("Finished after %d iterations\n", i); CHECK_ERROR(cudaGetLastError()); cudaFreeArray(cuInArray5); cudaFree(devSrc); cudaFree(devLM); cudaFree(done); }
c06e24ad2490eff4b88d58dd6edcb7b52c911623.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "IndiceTools.h" #include "DomaineMath.h" #include "cudaTools.h" #include "Device.h" #include "MandelbrotMath.h" #include "JuliaMath.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n) { // hirarchie de classe et polymorphisme (donc en utilisant des pointeurs) fonctionnels float c1 = -0.12; float c2 = 0.85; //TODO PROF: commenter ou dcommenter pour passer de Mandelbrot Julia ou vice versa //TODO PROF: voir version OpenMP pour tous les autres reprsentations de fractales // MandelbrotMathBase* mandelbrotMath = new JuliaMath(n, c1, c2); MandelbrotMathBase* mandelbrotMath = new MandelbrotMath(n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; double x; double y; int pixelI; int pixelJ; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) mandelbrotMath->colorXY(&color, x, y); // update color ptrDevPixels[s] = color; s += NB_THREAD; } // must be present ! delete mandelbrotMath; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
c06e24ad2490eff4b88d58dd6edcb7b52c911623.cu
#include "Indice2D.h" #include "IndiceTools.h" #include "DomaineMath.h" #include "cudaTools.h" #include "Device.h" #include "MandelbrotMath.h" #include "JuliaMath.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n) { // hiérarchie de classe et polymorphisme (donc en utilisant des pointeurs) fonctionnels float c1 = -0.12; float c2 = 0.85; //TODO PROF: commenter ou décommenter pour passer de Mandelbrot à Julia ou vice versa //TODO PROF: voir version OpenMP pour tous les autres représentations de fractales // MandelbrotMathBase* mandelbrotMath = new JuliaMath(n, c1, c2); MandelbrotMathBase* mandelbrotMath = new MandelbrotMath(n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; double x; double y; int pixelI; int pixelJ; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) mandelbrotMath->colorXY(&color, x, y); // update color ptrDevPixels[s] = color; s += NB_THREAD; } // must be present ! delete mandelbrotMath; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
105e8abdc3882aef6bafd18ff5250598d4ed04a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 256 #include<stdio.h> __global__ void vecAdd(float *a, float *b, float *c){ c[threadIdx.x] = a[threadIdx.x]+b[threadIdx.x]; } int main(void){ // number of bytes to alloc for arrays size_t numBytes = N*sizeof(float); // init host and device pointers float *ha, *hb, *hc; // alloc host memory/arrays hipMallocManaged(&ha,numBytes); hipMallocManaged(&hb,numBytes); hipMallocManaged(&hc,numBytes); // init host arrays for(int i=0; i<N; i++){ ha[i]=(float)i; hb[i]=(float)i; } // launch configuration dim3 gridSz(1,1,1), blockSz(N,1,1); // launch CUDA kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSz),dim3(blockSz), 0, 0, ha,hb,hc); // wait for kernel to finish hipDeviceSynchronize(); // kernel result (no memcpy!) for (int i=1; i<N; i++){ printf("c[%d]: %f\n",i,hc[i]);} // free host memory hipHostFree(ha); hipHostFree(hb); hipHostFree(hc); }
105e8abdc3882aef6bafd18ff5250598d4ed04a2.cu
#define N 256 #include<stdio.h> __global__ void vecAdd(float *a, float *b, float *c){ c[threadIdx.x] = a[threadIdx.x]+b[threadIdx.x]; } int main(void){ // number of bytes to alloc for arrays size_t numBytes = N*sizeof(float); // init host and device pointers float *ha, *hb, *hc; // alloc host memory/arrays cudaMallocManaged(&ha,numBytes); cudaMallocManaged(&hb,numBytes); cudaMallocManaged(&hc,numBytes); // init host arrays for(int i=0; i<N; i++){ ha[i]=(float)i; hb[i]=(float)i; } // launch configuration dim3 gridSz(1,1,1), blockSz(N,1,1); // launch CUDA kernel vecAdd<<<gridSz,blockSz>>>(ha,hb,hc); // wait for kernel to finish cudaDeviceSynchronize(); // kernel result (no memcpy!) for (int i=1; i<N; i++){ printf("c[%d]: %f\n",i,hc[i]);} // free host memory cudaFreeHost(ha); cudaFreeHost(hb); cudaFreeHost(hc); }
8fe19f7794afcf5cda2ffa29225582707246f792.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ float in1[ 1000 ]; __device__ __constant__ float in2[ 1000 ]; __global__ void vecadd( float *out, int N ) { int idx= blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { out[idx] =in1[idx] + in2[idx]; } } __global__ void vecadd( float *in1, float *in2, float *out, int N ) { int idx= blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { out[idx] =in1[idx] + in2[idx]; } }
8fe19f7794afcf5cda2ffa29225582707246f792.cu
#include "includes.h" __device__ float in1[ 1000 ]; __device__ __constant__ float in2[ 1000 ]; __global__ void vecadd( float *out, int N ) { int idx= blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { out[idx] =in1[idx] + in2[idx]; } } __global__ void vecadd( float *in1, float *in2, float *out, int N ) { int idx= blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { out[idx] =in1[idx] + in2[idx]; } }
837c5a0192c5eb357337084cc9e7a6bfa96ed6c6.hip
// !!! This is a file automatically generated by hipify!!! #include "graph.cuh" #include "vector2_hip.cuh" #include <iostream> Graph::Graph() { this->nodes = nullptr; this->displacement = nullptr; this->distinctEdges = nullptr; this->adjacencyMatrix = nullptr; this->numberOfNodes = 0; this->numberOfEdges = 0; }; Graph::Graph(float2* nodes, float2* displacement, int2* distinctEdges, int* adjacencyMatrix, unsigned int numberOfNodes, unsigned int numberOfEdges) : nodes(nodes), displacement(displacement), distinctEdges(distinctEdges), adjacencyMatrix(adjacencyMatrix), numberOfNodes(numberOfNodes), numberOfEdges(numberOfEdges) {}; void Graph::destroy() { delete[] this->nodes; delete[] this->displacement; delete[] this->distinctEdges; delete[] this->adjacencyMatrix; } void Graph::printNodes() { for (unsigned int i = 0; i < numberOfNodes; ++i) { std::cout << i << ":\t" << this->nodes[i].x << ", " << this->nodes[i].y << std::endl; } } void Graph::printNodesAndConnections() { for (unsigned int i = 0; i < numberOfNodes; ++i) { std::cout << i << "\t- "; for (unsigned int v = 0; v < numberOfNodes; ++v) { std::cout << this->adjacencyMatrix[i * numberOfNodes + v] << " "; } std::cout << std::endl; } }
837c5a0192c5eb357337084cc9e7a6bfa96ed6c6.cu
#include "graph.cuh" #include "vector2.cuh" #include <iostream> Graph::Graph() { this->nodes = nullptr; this->displacement = nullptr; this->distinctEdges = nullptr; this->adjacencyMatrix = nullptr; this->numberOfNodes = 0; this->numberOfEdges = 0; }; Graph::Graph(float2* nodes, float2* displacement, int2* distinctEdges, int* adjacencyMatrix, unsigned int numberOfNodes, unsigned int numberOfEdges) : nodes(nodes), displacement(displacement), distinctEdges(distinctEdges), adjacencyMatrix(adjacencyMatrix), numberOfNodes(numberOfNodes), numberOfEdges(numberOfEdges) {}; void Graph::destroy() { delete[] this->nodes; delete[] this->displacement; delete[] this->distinctEdges; delete[] this->adjacencyMatrix; } void Graph::printNodes() { for (unsigned int i = 0; i < numberOfNodes; ++i) { std::cout << i << ":\t" << this->nodes[i].x << ", " << this->nodes[i].y << std::endl; } } void Graph::printNodesAndConnections() { for (unsigned int i = 0; i < numberOfNodes; ++i) { std::cout << i << "\t- "; for (unsigned int v = 0; v < numberOfNodes; ++v) { std::cout << this->adjacencyMatrix[i * numberOfNodes + v] << " "; } std::cout << std::endl; } }
70d0a29ef8db47079b1c870ef12b61e50afead4b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void gpuAdd(int *d_a, int *d_b, int *d_c) { *d_c = *d_a + *d_b; } int main(void){ // define int h_a, h_b, h_c; int *d_a, *d_b, *d_c; // init h_a = 1; h_b = 4; // alloc hipMalloc((void**)&d_a, sizeof(int)); hipMalloc((void**)&d_b, sizeof(int)); hipMalloc((void**)&d_c, sizeof(int)); // copy value of host variable in device memory hipMemcpy(d_a, &h_a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, &h_b, sizeof(int), hipMemcpyHostToDevice); gpuAdd << <1, 1>> > (d_a, d_b, d_c); hipMemcpy(&h_c, d_c, sizeof(int), hipMemcpyDeviceToHost); printf("Passing Parameter by Reference Output: %d + %d = %d\n", h_a, h_b, h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
70d0a29ef8db47079b1c870ef12b61e50afead4b.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void gpuAdd(int *d_a, int *d_b, int *d_c) { *d_c = *d_a + *d_b; } int main(void){ // define int h_a, h_b, h_c; int *d_a, *d_b, *d_c; // init h_a = 1; h_b = 4; // alloc cudaMalloc((void**)&d_a, sizeof(int)); cudaMalloc((void**)&d_b, sizeof(int)); cudaMalloc((void**)&d_c, sizeof(int)); // copy value of host variable in device memory cudaMemcpy(d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &h_b, sizeof(int), cudaMemcpyHostToDevice); gpuAdd << <1, 1>> > (d_a, d_b, d_c); cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost); printf("Passing Parameter by Reference Output: %d + %d = %d\n", h_a, h_b, h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
f4738b94d7950fbeec5bc5c2fab570606c55ff4d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <algorithm> #include <iostream> #include <vector> #include <hip/hip_runtime.h> using namespace std; #define MAX_KERNEL_THREADS 256 // float or double typedef float vtype; typedef vector<vector<vtype>> matrix; template<typename T> __device__ T parallel_prefix_sum(const int n, const int *ind, const T *w) { T sum = 0.0; T last; int mn =(((n+blockDim.x-1)/blockDim.x)*blockDim.x); //n in multiple of blockDim.x for (int i=threadIdx.x; i<mn; i+=blockDim.x) { //All threads (especially the last one) must always participate //in the shfl instruction, otherwise their sum will be undefined. //So, the loop stopping condition is based on multiple of n in loop increments, //so that all threads enter into the loop and inside we make sure we do not //read out of bounds memory checking for the actual size n. //check if the thread is valid bool valid = i<n; //Notice that the last thread is used to propagate the prefix sum. //For all the threads, in the first iteration the last is 0, in the following //iterations it is the value at the last thread of the previous iterations. //get the value of the last thread last = __shfl(sum, blockDim.x-1, blockDim.x); //if you are valid read the value from memory, otherwise set your value to 0 sum = (valid) ? w[ind[i]] : 0.0; //do prefix sum (of size warpSize=blockDim.x =< 32) for (int j=1; j<blockDim.x; j*=2) { T v = __shfl_up(sum, j, blockDim.x); if (threadIdx.x >= j) sum += v; } //shift by last sum += last; //notice that no __threadfence or __syncthreads are needed in this implementation } //get the value of the last thread (to all threads) last = __shfl(sum, blockDim.x-1, blockDim.x); return last; } // Volume of neighboors (*weight_s) template<bool weighted, typename T> __global__ void jaccard_row_sum(const int n, const int *__restrict__ csrPtr, const int *__restrict__ csrInd, const T *__restrict__ w, T *__restrict__ work) { for (int row=threadIdx.y+blockIdx.y*blockDim.y; row<n; row+=gridDim.y*blockDim.y) { int start = csrPtr[row]; int end = csrPtr[row+1]; int length= end-start; //compute row sums if (weighted) { T sum = parallel_prefix_sum(length, csrInd + start, w); if (threadIdx.x == 0) work[row] = sum; } else { work[row] = (T)length; } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) // Note the number of columns is constrained by the number of rows template<bool weighted, typename T> __global__ void jaccard_is(const int n, const int e, const int *__restrict__ csrPtr, const int *__restrict__ csrInd, const T *__restrict__ v, const T *__restrict__ work, T *__restrict__ weight_i, T *__restrict__ weight_s) { for (int row=threadIdx.z+blockIdx.z*blockDim.z; row<n; row+=gridDim.z*blockDim.z) { for (int j=csrPtr[row]+threadIdx.y+blockIdx.y*blockDim.y; j<csrPtr[row+1]; j+=gridDim.y*blockDim.y) { int col = csrInd[j]; //find which row has least elements (and call it reference row) int Ni = csrPtr[row+1] - csrPtr[row]; int Nj = csrPtr[col+1] - csrPtr[col]; int ref= (Ni < Nj) ? row : col; int cur= (Ni < Nj) ? col : row; //compute new sum weights weight_s[j] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (int i=csrPtr[ref]+threadIdx.x+blockIdx.x*blockDim.x; i<csrPtr[ref+1]; i+=gridDim.x*blockDim.x) { int match =-1; int ref_col = csrInd[i]; T ref_val = weighted ? v[ref_col] : (T)1.0; //binary search (column indices are sorted within each row) int left = csrPtr[cur]; int right= csrPtr[cur+1]-1; while(left <= right){ int middle = (left+right)>>1; int cur_col= csrInd[middle]; if (cur_col > ref_col) { right=middle-1; } else if (cur_col < ref_col) { left=middle+1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1){ atomicAdd(&weight_i[j],ref_val); } } } } } template<bool weighted, typename T> __global__ void jaccard_jw(const int e, const T *__restrict__ csrVal, const T gamma, const T *__restrict__ weight_i, const T *__restrict__ weight_s, T *__restrict__ weight_j) { for (int j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) { T Wi = weight_i[j]; T Ws = weight_s[j]; weight_j[j] = (gamma*csrVal[j])* (Wi/(Ws-Wi)); } } template <bool weighted, typename T> __global__ void fill(const int e, T* w, const T value) { for (int j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) { // e.g. w[0] is the weight of a non-zeron element when csr_ind[i] equals 0. // So multiple non-zero elements on different rows of a matrix may share // the same weight value w[j] = weighted ? (T)(j+1)/e : value; } } template <bool weighted, typename T> void jaccard_weight (const int iteration, const int n, const int e, int* csr_ptr, int* csr_ind, T* csr_val) { const T gamma = (T)0.46; // arbitrary T *d_weight_i, *d_weight_s, *d_weight_j, *d_work; int *d_csrInd; int *d_csrPtr; T *d_csrVal; #ifdef DEBUG T* weight_i = (T*) malloc (sizeof(T) * e); T* weight_s = (T*) malloc (sizeof(T) * e); T* work = (T*) malloc (sizeof(T) * n); #endif T* weight_j = (T*) malloc (sizeof(T) * e); hipMalloc ((void**)&d_work, sizeof(T) * n); hipMalloc ((void**)&d_weight_i, sizeof(T) * e); hipMalloc ((void**)&d_weight_s, sizeof(T) * e); hipMalloc ((void**)&d_weight_j, sizeof(T) * e); hipMalloc ((void**)&d_csrVal, sizeof(T) * e); hipMalloc ((void**)&d_csrPtr, sizeof(int) * (n+1)); hipMalloc ((void**)&d_csrInd, sizeof(int) * e); hipMemcpy(d_csrPtr, csr_ptr, sizeof(int) * (n+1), hipMemcpyHostToDevice); hipMemcpy(d_csrInd, csr_ind, sizeof(int) * e, hipMemcpyHostToDevice); hipMemcpy(d_csrVal, csr_val, sizeof(T) * e, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iteration; i++) { dim3 nthreads, nblocks; // reuse for multiple kernels nthreads.x = MAX_KERNEL_THREADS; nthreads.y = 1; nthreads.z = 1; nblocks.x = (e+MAX_KERNEL_THREADS-1) / MAX_KERNEL_THREADS; nblocks.y = 1; nblocks.z = 1; hipLaunchKernelGGL(HIP_KERNEL_NAME(fill<weighted, T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_weight_j, (T)1.0); #ifdef DEBUG hipMemcpy(weight_j, d_weight_j, sizeof(T) * e, hipMemcpyDeviceToHost); for (int i = 0; i < e; i++) printf("wj: %d %f\n", i, weight_j[i]); #endif // initialize volume of intersections hipLaunchKernelGGL(HIP_KERNEL_NAME(fill<false, T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_weight_i, (T)0.0); // compute row sum with prefix sum const int y = 4; nthreads.x = 64/y; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = (n + nthreads.y - 1) / nthreads.y; // less than MAX CUDA BLOCKs nblocks.z = 1; hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_row_sum<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, n, d_csrPtr, d_csrInd, d_weight_j, d_work); #ifdef DEBUG hipMemcpy(work, d_work, sizeof(T) * n, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) printf("work: %d %f\n", i, work[i]); #endif // compute volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) // nthreads.x * nthreads.y * nthreads.z <= 256 nthreads.x = 32/y; nthreads.y = y; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = (n + nthreads.z - 1)/nthreads.z; // less than CUDA_MAX_BLOCKS); hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_is<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, n, e, d_csrPtr, d_csrInd, d_weight_j, d_work, d_weight_i, d_weight_s); #ifdef DEBUG hipMemcpy(weight_i, d_weight_i, sizeof(T) * e, hipMemcpyDeviceToHost); hipMemcpy(weight_s, d_weight_s, sizeof(T) * e, hipMemcpyDeviceToHost); for (int i = 0; i < e; i++) printf("wi: %d %f\n", i, weight_i[i]); for (int i = 0; i < e; i++) printf("ws: %d %f\n", i, weight_s[i]); #endif // compute jaccard weights nthreads.x = ::min(e, MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = (e + nthreads.x - 1)/nthreads.x; // less than MAX CUDA BLOCKs nblocks.y = 1; nblocks.z = 1; hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_jw<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_csrVal, gamma, d_weight_i, d_weight_s, d_weight_j); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); cout << "Average execution time of kernels: " << (time * 1e-9f) / iteration << " (s)\n"; hipMemcpy(weight_j, d_weight_j, sizeof(T) * e, hipMemcpyDeviceToHost); #ifdef DEBUG // verify using known values when weighted is true float error; if (weighted) error = ::fabs(weight_j[0] - 0.306667) + ::fabs(weight_j[1] - 0.000000) + ::fabs(weight_j[2] - 3.680000) + ::fabs(weight_j[3] - 1.380000) + ::fabs(weight_j[4] - 0.788571) + ::fabs(weight_j[5] - 0.460000); else error = ::fabs(weight_j[0] - 0.230000) + ::fabs(weight_j[1] - 0.000000) + ::fabs(weight_j[2] - 3.680000) + ::fabs(weight_j[3] - 1.380000) + ::fabs(weight_j[4] - 0.920000) + ::fabs(weight_j[5] - 0.460000); if (error > 1e-5) { for (int i = 0; i < e; i++) printf("wj: %d %f\n", i, weight_j[i]); printf("FAILED"); } else { printf("PASSED"); } printf("\n"); #endif hipFree (d_work); hipFree (d_weight_i); hipFree (d_weight_s); hipFree (d_weight_j); hipFree (d_csrInd); hipFree (d_csrVal); hipFree (d_csrPtr); free(weight_j); #ifdef DEBUG free(weight_i); free(weight_s); free(work); #endif } // Utilities void printMatrix(const matrix& M) { int m = M.size(); int n = M[0].size(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) cout << M[i][j] << " "; cout << endl; } } template <typename T> void printVector(const vector<T>& V, char* msg) { cout << msg << "[ "; for_each(V.begin(), V.end(), [](int a) { cout << a << " "; }); cout << "]" << endl; } // Reference: https://www.geeksforgeeks.org/sparse-matrix-representations-set-3-csr/ int main(int argc, char** argv) { int iteration = 10; #ifdef DEBUG matrix M = { { 0, 0, 0, 1}, { 5, 8, 0, 0}, { 0, 0, 3, 0}, { 0, 6, 0, 1} }; #else int numRow = atoi(argv[1]); int numCol = atoi(argv[2]); iteration = atoi(argv[3]); srand(2); matrix M; vector<vtype> rowElems(numCol); for (int r = 0; r < numRow; r++) { for (int c = 0; c < numCol; c++) rowElems[c] = rand() % 10; M.push_back(rowElems); } #endif int row = M.size(); int col = M[0].size(); printf("Number of matrix rows and cols: %d %d\n", row, col); vector<vtype> csr_val; vector<int> csr_ptr = { 0 }; // require -std=c++11 vector<int> csr_ind; int nnz = 0; // count Number of non-zero elements in each row for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { if (M[i][j] != (vtype)0) { csr_val.push_back(M[i][j]); csr_ind.push_back(j); nnz++; } } csr_ptr.push_back(nnz); } // print when the matrix is small if (row <= 16 && col <= 16) { printMatrix(M); printVector(csr_val, (char*)"values = "); printVector(csr_ptr, (char*)"row pointer = "); printVector(csr_ind, (char*)"col indices = "); } jaccard_weight<true, vtype>(iteration, row, nnz, csr_ptr.data(), csr_ind.data(), csr_val.data()); jaccard_weight<false, vtype>(iteration, row, nnz, csr_ptr.data(), csr_ind.data(), csr_val.data()); return 0; }
f4738b94d7950fbeec5bc5c2fab570606c55ff4d.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <algorithm> #include <iostream> #include <vector> #include <hip/hip_runtime.h> using namespace std; #define MAX_KERNEL_THREADS 256 // float or double typedef float vtype; typedef vector<vector<vtype>> matrix; template<typename T> __device__ T parallel_prefix_sum(const int n, const int *ind, const T *w) { T sum = 0.0; T last; int mn =(((n+blockDim.x-1)/blockDim.x)*blockDim.x); //n in multiple of blockDim.x for (int i=threadIdx.x; i<mn; i+=blockDim.x) { //All threads (especially the last one) must always participate //in the shfl instruction, otherwise their sum will be undefined. //So, the loop stopping condition is based on multiple of n in loop increments, //so that all threads enter into the loop and inside we make sure we do not //read out of bounds memory checking for the actual size n. //check if the thread is valid bool valid = i<n; //Notice that the last thread is used to propagate the prefix sum. //For all the threads, in the first iteration the last is 0, in the following //iterations it is the value at the last thread of the previous iterations. //get the value of the last thread last = __shfl(sum, blockDim.x-1, blockDim.x); //if you are valid read the value from memory, otherwise set your value to 0 sum = (valid) ? w[ind[i]] : 0.0; //do prefix sum (of size warpSize=blockDim.x =< 32) for (int j=1; j<blockDim.x; j*=2) { T v = __shfl_up(sum, j, blockDim.x); if (threadIdx.x >= j) sum += v; } //shift by last sum += last; //notice that no __threadfence or __syncthreads are needed in this implementation } //get the value of the last thread (to all threads) last = __shfl(sum, blockDim.x-1, blockDim.x); return last; } // Volume of neighboors (*weight_s) template<bool weighted, typename T> __global__ void jaccard_row_sum(const int n, const int *__restrict__ csrPtr, const int *__restrict__ csrInd, const T *__restrict__ w, T *__restrict__ work) { for (int row=threadIdx.y+blockIdx.y*blockDim.y; row<n; row+=gridDim.y*blockDim.y) { int start = csrPtr[row]; int end = csrPtr[row+1]; int length= end-start; //compute row sums if (weighted) { T sum = parallel_prefix_sum(length, csrInd + start, w); if (threadIdx.x == 0) work[row] = sum; } else { work[row] = (T)length; } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) // Note the number of columns is constrained by the number of rows template<bool weighted, typename T> __global__ void jaccard_is(const int n, const int e, const int *__restrict__ csrPtr, const int *__restrict__ csrInd, const T *__restrict__ v, const T *__restrict__ work, T *__restrict__ weight_i, T *__restrict__ weight_s) { for (int row=threadIdx.z+blockIdx.z*blockDim.z; row<n; row+=gridDim.z*blockDim.z) { for (int j=csrPtr[row]+threadIdx.y+blockIdx.y*blockDim.y; j<csrPtr[row+1]; j+=gridDim.y*blockDim.y) { int col = csrInd[j]; //find which row has least elements (and call it reference row) int Ni = csrPtr[row+1] - csrPtr[row]; int Nj = csrPtr[col+1] - csrPtr[col]; int ref= (Ni < Nj) ? row : col; int cur= (Ni < Nj) ? col : row; //compute new sum weights weight_s[j] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (int i=csrPtr[ref]+threadIdx.x+blockIdx.x*blockDim.x; i<csrPtr[ref+1]; i+=gridDim.x*blockDim.x) { int match =-1; int ref_col = csrInd[i]; T ref_val = weighted ? v[ref_col] : (T)1.0; //binary search (column indices are sorted within each row) int left = csrPtr[cur]; int right= csrPtr[cur+1]-1; while(left <= right){ int middle = (left+right)>>1; int cur_col= csrInd[middle]; if (cur_col > ref_col) { right=middle-1; } else if (cur_col < ref_col) { left=middle+1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1){ atomicAdd(&weight_i[j],ref_val); } } } } } template<bool weighted, typename T> __global__ void jaccard_jw(const int e, const T *__restrict__ csrVal, const T gamma, const T *__restrict__ weight_i, const T *__restrict__ weight_s, T *__restrict__ weight_j) { for (int j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) { T Wi = weight_i[j]; T Ws = weight_s[j]; weight_j[j] = (gamma*csrVal[j])* (Wi/(Ws-Wi)); } } template <bool weighted, typename T> __global__ void fill(const int e, T* w, const T value) { for (int j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) { // e.g. w[0] is the weight of a non-zeron element when csr_ind[i] equals 0. // So multiple non-zero elements on different rows of a matrix may share // the same weight value w[j] = weighted ? (T)(j+1)/e : value; } } template <bool weighted, typename T> void jaccard_weight (const int iteration, const int n, const int e, int* csr_ptr, int* csr_ind, T* csr_val) { const T gamma = (T)0.46; // arbitrary T *d_weight_i, *d_weight_s, *d_weight_j, *d_work; int *d_csrInd; int *d_csrPtr; T *d_csrVal; #ifdef DEBUG T* weight_i = (T*) malloc (sizeof(T) * e); T* weight_s = (T*) malloc (sizeof(T) * e); T* work = (T*) malloc (sizeof(T) * n); #endif T* weight_j = (T*) malloc (sizeof(T) * e); hipMalloc ((void**)&d_work, sizeof(T) * n); hipMalloc ((void**)&d_weight_i, sizeof(T) * e); hipMalloc ((void**)&d_weight_s, sizeof(T) * e); hipMalloc ((void**)&d_weight_j, sizeof(T) * e); hipMalloc ((void**)&d_csrVal, sizeof(T) * e); hipMalloc ((void**)&d_csrPtr, sizeof(int) * (n+1)); hipMalloc ((void**)&d_csrInd, sizeof(int) * e); hipMemcpy(d_csrPtr, csr_ptr, sizeof(int) * (n+1), hipMemcpyHostToDevice); hipMemcpy(d_csrInd, csr_ind, sizeof(int) * e, hipMemcpyHostToDevice); hipMemcpy(d_csrVal, csr_val, sizeof(T) * e, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iteration; i++) { dim3 nthreads, nblocks; // reuse for multiple kernels nthreads.x = MAX_KERNEL_THREADS; nthreads.y = 1; nthreads.z = 1; nblocks.x = (e+MAX_KERNEL_THREADS-1) / MAX_KERNEL_THREADS; nblocks.y = 1; nblocks.z = 1; hipLaunchKernelGGL(HIP_KERNEL_NAME(fill<weighted, T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_weight_j, (T)1.0); #ifdef DEBUG hipMemcpy(weight_j, d_weight_j, sizeof(T) * e, hipMemcpyDeviceToHost); for (int i = 0; i < e; i++) printf("wj: %d %f\n", i, weight_j[i]); #endif // initialize volume of intersections hipLaunchKernelGGL(HIP_KERNEL_NAME(fill<false, T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_weight_i, (T)0.0); // compute row sum with prefix sum const int y = 4; nthreads.x = 64/y; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = (n + nthreads.y - 1) / nthreads.y; // less than MAX CUDA BLOCKs nblocks.z = 1; hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_row_sum<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, n, d_csrPtr, d_csrInd, d_weight_j, d_work); #ifdef DEBUG hipMemcpy(work, d_work, sizeof(T) * n, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) printf("work: %d %f\n", i, work[i]); #endif // compute volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) // nthreads.x * nthreads.y * nthreads.z <= 256 nthreads.x = 32/y; nthreads.y = y; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = (n + nthreads.z - 1)/nthreads.z; // less than CUDA_MAX_BLOCKS); hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_is<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, n, e, d_csrPtr, d_csrInd, d_weight_j, d_work, d_weight_i, d_weight_s); #ifdef DEBUG hipMemcpy(weight_i, d_weight_i, sizeof(T) * e, hipMemcpyDeviceToHost); hipMemcpy(weight_s, d_weight_s, sizeof(T) * e, hipMemcpyDeviceToHost); for (int i = 0; i < e; i++) printf("wi: %d %f\n", i, weight_i[i]); for (int i = 0; i < e; i++) printf("ws: %d %f\n", i, weight_s[i]); #endif // compute jaccard weights nthreads.x = std::min(e, MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = (e + nthreads.x - 1)/nthreads.x; // less than MAX CUDA BLOCKs nblocks.y = 1; nblocks.z = 1; hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_jw<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_csrVal, gamma, d_weight_i, d_weight_s, d_weight_j); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); cout << "Average execution time of kernels: " << (time * 1e-9f) / iteration << " (s)\n"; hipMemcpy(weight_j, d_weight_j, sizeof(T) * e, hipMemcpyDeviceToHost); #ifdef DEBUG // verify using known values when weighted is true float error; if (weighted) error = std::fabs(weight_j[0] - 0.306667) + std::fabs(weight_j[1] - 0.000000) + std::fabs(weight_j[2] - 3.680000) + std::fabs(weight_j[3] - 1.380000) + std::fabs(weight_j[4] - 0.788571) + std::fabs(weight_j[5] - 0.460000); else error = std::fabs(weight_j[0] - 0.230000) + std::fabs(weight_j[1] - 0.000000) + std::fabs(weight_j[2] - 3.680000) + std::fabs(weight_j[3] - 1.380000) + std::fabs(weight_j[4] - 0.920000) + std::fabs(weight_j[5] - 0.460000); if (error > 1e-5) { for (int i = 0; i < e; i++) printf("wj: %d %f\n", i, weight_j[i]); printf("FAILED"); } else { printf("PASSED"); } printf("\n"); #endif hipFree (d_work); hipFree (d_weight_i); hipFree (d_weight_s); hipFree (d_weight_j); hipFree (d_csrInd); hipFree (d_csrVal); hipFree (d_csrPtr); free(weight_j); #ifdef DEBUG free(weight_i); free(weight_s); free(work); #endif } // Utilities void printMatrix(const matrix& M) { int m = M.size(); int n = M[0].size(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) cout << M[i][j] << " "; cout << endl; } } template <typename T> void printVector(const vector<T>& V, char* msg) { cout << msg << "[ "; for_each(V.begin(), V.end(), [](int a) { cout << a << " "; }); cout << "]" << endl; } // Reference: https://www.geeksforgeeks.org/sparse-matrix-representations-set-3-csr/ int main(int argc, char** argv) { int iteration = 10; #ifdef DEBUG matrix M = { { 0, 0, 0, 1}, { 5, 8, 0, 0}, { 0, 0, 3, 0}, { 0, 6, 0, 1} }; #else int numRow = atoi(argv[1]); int numCol = atoi(argv[2]); iteration = atoi(argv[3]); srand(2); matrix M; vector<vtype> rowElems(numCol); for (int r = 0; r < numRow; r++) { for (int c = 0; c < numCol; c++) rowElems[c] = rand() % 10; M.push_back(rowElems); } #endif int row = M.size(); int col = M[0].size(); printf("Number of matrix rows and cols: %d %d\n", row, col); vector<vtype> csr_val; vector<int> csr_ptr = { 0 }; // require -std=c++11 vector<int> csr_ind; int nnz = 0; // count Number of non-zero elements in each row for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { if (M[i][j] != (vtype)0) { csr_val.push_back(M[i][j]); csr_ind.push_back(j); nnz++; } } csr_ptr.push_back(nnz); } // print when the matrix is small if (row <= 16 && col <= 16) { printMatrix(M); printVector(csr_val, (char*)"values = "); printVector(csr_ptr, (char*)"row pointer = "); printVector(csr_ind, (char*)"col indices = "); } jaccard_weight<true, vtype>(iteration, row, nnz, csr_ptr.data(), csr_ind.data(), csr_val.data()); jaccard_weight<false, vtype>(iteration, row, nnz, csr_ptr.data(), csr_ind.data(), csr_val.data()); return 0; }
df9f7b747205412e5d83e35a06f35533afafa4d0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vector_exp10.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; REAL *y = NULL; hipMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vector_exp10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vector_exp10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vector_exp10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
df9f7b747205412e5d83e35a06f35533afafa4d0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vector_exp10.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; REAL *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vector_exp10<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vector_exp10<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vector_exp10<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
718f63caa0db767e7050932f6fd4ed72e526a9dc.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <stdint.h> #include <stdio.h> void performExperiment(int size ) { thrust::host_vector<float> values(size); //can I fread directly into values ? for (int i = 0; i < size; ++i) { values[i] = 1.0; } thrust::device_vector<float> dvalues = values; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); thrust::inclusive_scan(dvalues.begin(),dvalues.end(),dvalues.begin()); hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); // std::cout << std::endl; printf("%f\n", elapsedTime); //for (int i = 0; i < size; ++i) { values[0] = dvalues[size-1]; //} //for (int i = 0; i < 512; ++i) { printf("%f ",values[0]); //} } int main(int argc, char **argv){ performExperiment(4096*4096); return 0; }
718f63caa0db767e7050932f6fd4ed72e526a9dc.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <stdint.h> #include <stdio.h> void performExperiment(int size ) { thrust::host_vector<float> values(size); //can I fread directly into values ? for (int i = 0; i < size; ++i) { values[i] = 1.0; } thrust::device_vector<float> dvalues = values; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); thrust::inclusive_scan(dvalues.begin(),dvalues.end(),dvalues.begin()); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); // std::cout << std::endl; printf("%f\n", elapsedTime); //for (int i = 0; i < size; ++i) { values[0] = dvalues[size-1]; //} //for (int i = 0; i < 512; ++i) { printf("%f ",values[0]); //} } int main(int argc, char **argv){ performExperiment(4096*4096); return 0; }
55c4ddede9aa369b210bd20a5afd2bb8d9a28c9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.hpp" #include <io/utilities/block_utils.cuh> #include <io/utilities/column_buffer.hpp> #include <cuda/std/tuple> #include <cudf/detail/utilities/assert.cuh> #include <cudf/detail/utilities/hash_functions.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/strings/string_view.hpp> #include <cudf/utilities/bit.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/functional.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/tuple.h> constexpr int block_size = 128; constexpr int non_zero_buffer_size = block_size * 2; inline __device__ uint32_t rotl32(uint32_t x, uint32_t r) { return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r)); } inline __device__ int rolling_index(int index) { return index & (non_zero_buffer_size - 1); } namespace cudf { namespace io { namespace parquet { namespace gpu { namespace { struct page_state_s { const uint8_t* data_start; const uint8_t* data_end; const uint8_t* lvl_end; const uint8_t* dict_base; // ptr to dictionary page data int32_t dict_size; // size of dictionary data int32_t first_row; // First row in page to output int32_t num_rows; // Rows in page to decode (including rows to be skipped) int32_t first_output_value; // First value in page to output int32_t num_input_values; // total # of input/level values in the page int32_t dtype_len; // Output data type length int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits; // # of bits to store dictionary indices uint32_t dict_run; int32_t dict_val; uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] int32_t error; PageInfo page; ColumnChunkDesc col; // (leaf) value decoding int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos; // write position of dictionary indices int32_t src_pos; // input read position of final output value int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale uint32_t nz_idx[non_zero_buffer_size]; // circular buffer of non-null value positions uint32_t dict_idx[non_zero_buffer_size]; // Dictionary index, boolean, or string offset values uint32_t str_len[non_zero_buffer_size]; // String length for plain encoding of strings // repetition/definition level decoding int32_t input_value_count; // how many values of the input we've processed int32_t input_row_count; // how many rows of the input we've processed int32_t input_leaf_count; // how many leaf values of the input we've processed uint32_t rep[non_zero_buffer_size]; // circular buffer of repetition level values uint32_t def[non_zero_buffer_size]; // circular buffer of definition level values const uint8_t* lvl_start[NUM_LEVEL_TYPES]; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded int32_t row_index_lower_bound; // lower bound of row indices we should process }; /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(const uint8_t*& cur, const uint8_t* end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] level_bits The bits required * * @return The length of the section */ __device__ uint32_t InitLevelSection(page_state_s* s, const uint8_t* cur, const uint8_t* end, level_type lvl) { int32_t len; int level_bits = s->col.level_bits[lvl]; Encoding encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else if (encoding == Encoding::RLE) { // V2 only uses RLE encoding, so only perform check here if (s->page.def_lvl_bytes || s->page.rep_lvl_bytes) { len = lvl == level_type::DEFINITION ? s->page.def_lvl_bytes : s->page.rep_lvl_bytes; } else if (cur + 4 < end) { len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; } else { len = 0; s->error = 2; } if (!s->error) { uint32_t run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { int v = (cur < end) ? cur[0] : 0; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } s->lvl_start[lvl] = cur; if (cur > end) { s->error = 2; } } } else if (encoding == Encoding::BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else { s->error = 3; len = 0; } return static_cast<uint32_t>(len); } /** * @brief Decode values out of a definition or repetition stream * * @param[in,out] s Page state input/output * @param[in] t target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION */ __device__ void gpuDecodeStream( uint32_t* output, page_state_s* s, int32_t target_count, int t, level_type lvl) { const uint8_t* cur_def = s->lvl_start[lvl]; const uint8_t* end = s->lvl_end; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { const uint8_t* cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end || level_run <= 1) { s->error = 0x10; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = shuffle(sym_len); level_val = shuffle(level_val); level_run = shuffle(level_run); cur_def += sym_len; } if (s->error) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; const uint8_t* cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[idx & (non_zero_buffer_size - 1)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * * @return A pair containing the new output position, and the total length of strings decoded (this * will only be valid on thread 0 and if sizes_only is true) */ template <bool sizes_only> __device__ cuda::std::pair<int, int> gpuDecodeDictionaryIndices(volatile page_state_s* s, int target_pos, int t) { const uint8_t* end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; int str_len = 0; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t* cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } __syncwarp(); is_literal = shuffle(is_literal); batch_len = shuffle(batch_len); // compute dictionary index. int dict_idx = 0; if (t < batch_len) { dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t* p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } // if we're not computing sizes, store off the dictionary index if constexpr (!sizes_only) { s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx; } } // if we're computing sizes, add the length(s) if constexpr (sizes_only) { int const len = [&]() { if (t >= batch_len) { return 0; } // we may end up decoding more indices than we asked for. so don't include those in the // size calculation if (pos + t >= target_pos) { return 0; } // TODO: refactor this with gpuGetStringData / gpuGetStringSize uint32_t const dict_pos = (s->dict_bits > 0) ? dict_idx * sizeof(string_index_pair) : 0; if (target_pos && dict_pos < (uint32_t)s->dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos); return src->second; } return 0; }(); using WarpReduce = hipcub::WarpReduce<size_type>; __shared__ typename WarpReduce::TempStorage temp_storage; // note: str_len will only be valid on thread 0. str_len += WarpReduce(temp_storage).Sum(len); } pos += batch_len; } return {pos, str_len}; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[in] target_pos Target write position * @param[in] t Thread ID * * @return The new output position */ __device__ int gpuDecodeRleBooleans(volatile page_state_s* s, int target_pos, int t) { const uint8_t* end = s->data_end; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t* cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } __syncwarp(); is_literal = shuffle(is_literal); batch_len = shuffle(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); const uint8_t* p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings and returns total length of all strings * processed * * @param[in,out] s Page state input/output * @param[in] target_pos Target output position * @param[in] t Thread ID * * @return Total length of strings processed */ __device__ size_type gpuInitStringDescriptors(volatile page_state_s* s, int target_pos, int t) { int pos = s->dict_pos; int total_len = 0; // This step is purely serial if (!t) { const uint8_t* cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len; if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } else { len = 0; } s->dict_idx[pos & (non_zero_buffer_size - 1)] = k; s->str_len[pos & (non_zero_buffer_size - 1)] = len; k += len; total_len += len; pos++; } s->dict_val = k; __threadfence_block(); } return total_len; } /** * @brief Retrieves string information for a string at the specified source position * * @param[in] s Page state input * @param[in] src_pos Source position * * @return A pair containing a pointer to the string and its length */ inline __device__ cuda::std::pair<const char*, size_t> gpuGetStringData(volatile page_state_s* s, int src_pos) { const char* ptr = nullptr; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] * sizeof(string_index_pair) : 0; if (dict_pos < (uint32_t)s->dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos); ptr = src->first; len = src->second; } } else { // Plain encoding uint32_t dict_pos = s->dict_idx[src_pos & (non_zero_buffer_size - 1)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<const char*>(s->data_start + dict_pos); len = s->str_len[src_pos & (non_zero_buffer_size - 1)]; } } return {ptr, len}; } /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ inline __device__ void gpuOutputString(volatile page_state_s* s, int src_pos, void* dstv) { auto [ptr, len] = gpuGetStringData(s, src_pos); if (s->dtype_len == 4) { // Output hash. This hash value is used if the option to convert strings to // categoricals is enabled. The seed value is chosen arbitrarily. uint32_t constexpr hash_seed = 33; cudf::string_view const sv{ptr, static_cast<size_type>(len)}; *static_cast<uint32_t*>(dstv) = cudf::detail::MurmurHash3_32<cudf::string_view>{hash_seed}(sv); } else { // Output string descriptor auto* dst = static_cast<string_index_pair*>(dstv); dst->first = ptr; dst->second = len; } } /** * @brief Output a boolean * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputBoolean(volatile page_state_s* s, int src_pos, uint8_t* dst) { *dst = s->dict_idx[src_pos & (non_zero_buffer_size - 1)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t* dst, const uint8_t* src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2* dst, const uint8_t* src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[out] dst Pointer to row output data */ inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst) { using cuda::std::chrono::duration_cast; const uint8_t* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 >= dict_size) { *dst = 0; return; } uint3 v; int64_t nanos, days; v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); v.z = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); cudf::duration_D d_d{ days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow *dst = [&]() { switch (s->col.ts_clock_rate) { case 1: // seconds return duration_cast<duration_s>(d_d).count() + duration_cast<duration_s>(duration_ns{nanos}).count(); case 1'000: // milliseconds return duration_cast<duration_ms>(d_d).count() + duration_cast<duration_ms>(duration_ns{nanos}).count(); case 1'000'000: // microseconds return duration_cast<duration_us>(d_d).count() + duration_cast<duration_us>(duration_ns{nanos}).count(); case 1'000'000'000: // nanoseconds default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos; } }(); } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst) { const uint8_t* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Output a byte array as int. * * @param[in] ptr Pointer to the byte array * @param[in] len Byte array length * @param[out] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputByteArrayAsInt(char const* ptr, int32_t len, T* dst) { T unscaled = 0; for (auto i = 0; i < len; i++) { uint8_t v = ptr[i]; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. unscaled <<= (sizeof(T) - len) * 8; unscaled >>= (sizeof(T) - len) * 8; *dst = unscaled; } /** * @brief Output a fixed-length byte array as int. * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputFixedLenByteArrayAsInt(volatile page_state_s* s, int src_pos, T* dst) { uint32_t const dtype_len_in = s->dtype_len_in; uint8_t const* data = s->dict_base ? s->dict_base : s->data_start; uint32_t const pos = (s->dict_base ? ((s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0) : src_pos) * dtype_len_in; uint32_t const dict_size = s->dict_size; T unscaled = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. if (dtype_len_in < sizeof(T)) { unscaled <<= (sizeof(T) - dtype_len_in) * 8; unscaled >>= (sizeof(T) - dtype_len_in) * 8; } *dst = unscaled; } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> inline __device__ void gpuOutputFast(volatile page_state_s* s, int src_pos, T* dst) { const uint8_t* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ static __device__ void gpuOutputGeneric(volatile page_state_s* s, int src_pos, uint8_t* dst8, int len) { const uint8_t* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time const uint8_t* src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf; } } } /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] min_row Crop all rows below min_row * @param[in] num_rows Maximum number of rows to read * @param[in] is_decode_step If we are setting up for the decode step (instead of the preprocess * step) */ static __device__ bool setupLocalPageInfo(page_state_s* const s, PageInfo const* p, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, bool is_decode_step) { int t = threadIdx.x; int chunk_idx; // Fetch page info if (t == 0) s->page = *p; __syncthreads(); if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; } // Fetch column chunk info chunk_idx = s->page.chunk_idx; if (t == 0) { s->col = chunks[chunk_idx]; } // zero nested value and valid counts int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].valid_count = 0; s->page.nesting[d + t].value_count = 0; s->page.nesting[d + t].null_count = 0; } d += blockDim.x; } __syncthreads(); if (!t) { s->error = 0; // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t page_start_row = s->col.start_row + s->page.chunk_row; // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t* cur = s->page.page_data; uint8_t* end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type auto const data_type = s->col.data_type & 7; switch (data_type) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: [[fallthrough]]; case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; // Duration types are not included because no scaling is done when reading if (s->col.converted_type == TIMESTAMP_MILLIS) { units = cudf::timestamp_ms::period::den; } else if (s->col.converted_type == TIMESTAMP_MICROS) { units = cudf::timestamp_us::period::den; } else if (s->col.logical_type.TIMESTAMP.unit.isset.NANOS) { units = cudf::timestamp_ns::period::den; } if (units and units != s->col.ts_clock_rate) { s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } } [[fallthrough]]; case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: if (s->col.converted_type == DECIMAL) { auto const decimal_precision = s->col.decimal_precision; s->dtype_len = [decimal_precision]() { if (decimal_precision <= MAX_DECIMAL32_PRECISION) { return sizeof(int32_t); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { return sizeof(int64_t); } else { return sizeof(__int128_t); } }(); } else { s->dtype_len = sizeof(string_index_pair); } break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; s->error |= (s->dtype_len <= 0); break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (s->col.converted_type == DECIMAL && data_type == FIXED_LEN_BYTE_ARRAY) { s->dtype_len = [dtype_len = s->dtype_len]() { if (dtype_len <= sizeof(int32_t)) { return sizeof(int32_t); } else if (dtype_len <= sizeof(int64_t)) { return sizeof(int64_t); } else { return sizeof(__int128_t); } }(); } else if (data_type == INT32) { if (dtype_len_out == 1) { // INT8 output s->dtype_len = 1; } else if (dtype_len_out == 2) { // INT16 output s->dtype_len = 2; } else if (s->col.converted_type == TIME_MILLIS) { // INT64 output s->dtype_len = 8; } } else if (data_type == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if (data_type == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // NOTE: s->page.num_rows, s->col.chunk_row, s->first_row and s->num_rows will be // invalid/bogus during first pass of the preprocess step for nested types. this is ok // because we ignore these values in that stage. { auto const max_row = min_row + num_rows; // if we are totally outside the range of the input, do nothing if ((page_start_row > max_row) || (page_start_row + s->page.num_rows < min_row)) { s->first_row = 0; s->num_rows = 0; } // otherwise else { s->first_row = page_start_row >= min_row ? 0 : min_row - page_start_row; auto const max_page_rows = s->page.num_rows - s->first_row; s->num_rows = (page_start_row + s->first_row) + max_page_rows <= max_row ? max_page_rows : max_row - (page_start_row + s->first_row); } } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is responsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step // // NOTE: in a chunked read situation, s->col.column_data_base and s->col.valid_map_base // will be aliased to memory that has been freed when we get here in the non-decode step, so // we cannot check against nullptr. we'll just check a flag directly. if (is_decode_step) { int max_depth = s->col.max_nesting_depth; for (int idx = 0; idx < max_depth; idx++) { PageNestingInfo* pni = &s->page.nesting[idx]; size_t output_offset; // schemas without lists if (s->col.max_level[level_type::REPETITION] == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for schemas with lists, we've already got the exact value precomputed else { output_offset = pni->page_start_value; } pni->data_out = static_cast<uint8_t*>(s->col.column_data_base[idx]); if (pni->data_out != nullptr) { // anything below max depth with a valid data pointer must be a list, so the // element size is the size of the offset type. uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len; pni->data_out += (output_offset * len); } pni->valid_map = s->col.valid_map_base[idx]; if (pni->valid_map != nullptr) { pni->valid_map += output_offset >> 5; pni->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = nullptr; s->dict_size = 0; switch (s->page.encoding) { case Encoding::PLAIN_DICTIONARY: case Encoding::RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<const uint8_t*>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(string_index_pair); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } break; case Encoding::PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case Encoding::RLE: s->dict_run = 0; break; default: s->error = 1; // Unsupported encoding break; } if (cur > end) { s->error = 1; } s->lvl_end = cur; s->data_start = cur; s->data_end = end; } else { s->error = 1; } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->src_pos = 0; // for flat hierarchies, we can't know how many leaf values to skip unless we do a full // preprocess of the definition levels (since nulls will have no actual decodable value, there // is no direct correlation between # of rows and # of decodable values). so we will start // processing at the beginning of the value stream and disregard any indices that start // before the first row. if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = 0; s->page.skipped_leaf_values = 0; s->input_value_count = 0; s->input_row_count = 0; s->input_leaf_count = 0; s->row_index_lower_bound = -1; } // for nested hierarchies, we have run a preprocess that lets us skip directly to the values // we need to start decoding at else { // input_row_count translates to "how many rows we have processed so far", so since we are // skipping directly to where we want to start decoding, set it to first_row s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int const max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (is_decode_step) { s->input_value_count = s->page.skipped_values > -1 ? s->page.skipped_values : 0; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; // magic number to indicate it hasn't been set for use inside UpdatePageSizes s->page.skipped_leaf_values = 0; } } __threadfence_block(); } __syncthreads(); return true; } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is * also updated * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ static __device__ void store_validity(PageNestingInfo* pni, uint32_t valid_mask, int32_t value_count) { int word_offset = pni->valid_map_offset / 32; int bit_offset = pni->valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { auto relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { pni->valid_map[word_offset] = valid_mask; } else { atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left); } pni->valid_map_offset += value_count; } /** * @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level * D to which we should considered them null or not. * * @param[out] start_depth The start nesting depth * @param[out] end_depth The end nesting depth (inclusive) * @param[out] d The definition level up to which added values are not-null. if t is out of bounds, * d will be -1 * @param[in] s Local page information * @param[in] input_value_count The current count of input level values we have processed * @param[in] target_input_value_count The desired # of input level values we want to process * @param[in] t Thread index */ inline __device__ void get_nesting_bounds(int& start_depth, int& end_depth, int& d, page_state_s* s, int input_value_count, int32_t target_input_value_count, int t) { start_depth = -1; end_depth = -1; d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); d = s->def[index]; // if we have repetition (there are list columns involved) we have to // bound what nesting levels we apply values to if (s->col.max_level[level_type::REPETITION] > 0) { int r = s->rep[index]; start_depth = s->page.nesting[r].start_depth; end_depth = s->page.nesting[d].end_depth; } // for columns without repetition (even ones involving structs) we always // traverse the entire hierarchy. else { start_depth = 0; end_depth = s->col.max_nesting_depth - 1; } } } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[in] t Thread index */ static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s* s, int t) { // max nesting depth of the column int const max_depth = s->col.max_nesting_depth; bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread (the range of nesting depths we // will generate new value indices and validity bits for) int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int const is_new_row = start_depth == 0 ? 1 : 0; uint32_t const warp_row_count_mask = ballot(is_new_row); int32_t const thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within read row bounds? int const in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t const warp_count_mask = ballot((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // walk from 0 to max_depth uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingInfo* pni = &s->page.nesting[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int const in_nesting_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a non-null value uint32_t const is_valid = d >= pni->max_def_level && in_nesting_bounds ? 1 : 0; // compute warp and thread valid counts uint32_t const warp_valid_mask = // for flat schemas, a simple ballot_sync gives us the correct count and bit positions // because every value in the input matches to a value in the output !has_repetition ? ballot(is_valid) : // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so // the validity bit for thread t might actually represent output value t-6. the correct // position for thread t's bit is cur_value_count. for cuda 11 we could use // __reduce_or_sync(), but until then we have to do a warp reduce. WarpReduceOr32(is_valid << thread_value_count); thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index for value decoding if (is_valid && s_idx == max_depth - 1) { int const src_pos = pni->valid_count + thread_valid_count; int const dst_pos = pni->value_count + thread_value_count; // nz_idx is a mapping of src buffer indices to destination buffer indices s->nz_idx[rolling_index(src_pos)] = dst_pos; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth - 1) { uint32_t const next_warp_count_mask = ballot((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column and we're within nesting/row bounds // and we have a valid data_out pointer, it implies this is a list column, so // emit an offset. if (in_nesting_bounds && pni->data_out != nullptr) { int const idx = pni->value_count + thread_value_count; cudf::size_type const ofs = s->page.nesting[s_idx + 1].value_count + next_thread_value_count + s->page.nesting[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type*>(pni->data_out))[idx] = ofs; } } // nested schemas always read and write to the same bounds (that is, read and write positions // are already pre-bounded by first_row/num_rows). flat schemas will start reading at the // first value, even if that is before first_row, because we cannot trivially jump to // the correct position to start reading. since we are about to write the validity vector here // we need to adjust our computed mask to take into account the write row bounds. int const in_write_row_bounds = !has_repetition ? thread_row_index >= s->first_row && thread_row_index < (s->first_row + s->num_rows) : in_row_bounds; int const first_thread_in_write_range = !has_repetition ? __ffs(ballot(in_write_row_bounds)) - 1 : 0; // # of bits to of the validity mask to write out int const warp_valid_mask_bit_count = first_thread_in_write_range < 0 ? 0 : warp_value_count - first_thread_in_write_range; // increment count of valid values, count of total values, and update validity mask if (!t) { if (pni->valid_map != nullptr && warp_valid_mask_bit_count > 0) { uint32_t const warp_output_valid_mask = warp_valid_mask >> first_thread_in_write_range; store_validity(pni, warp_output_valid_mask, warp_valid_mask_bit_count); pni->null_count += warp_valid_mask_bit_count - __popc(warp_output_valid_mask); } pni->valid_count += warp_valid_count; pni->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); __syncwarp(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = s->page.nesting[max_depth - 1].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] t Thread index */ __device__ void gpuDecodeLevels(page_state_s* s, int32_t target_leaf_count, int t) { bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (!s->error && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { if (has_repetition) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION); __syncwarp(); // because the rep and def streams are encoded separately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t); cur_leaf_count = actual_leaf_count + batch_size; __syncwarp(); } } /** * @brief Process a batch of incoming repetition/definition level values to generate * per-nesting level output column size for this page. * * Each page represents one piece of the overall output column. The total output (cudf) * column sizes are the sum of the values in each individual page. * * @param[in] s The local page info * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] t Thread index * @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows * have been computed for this page (they will only be set in the second/trim pass). */ static __device__ void gpuUpdatePageSizes(page_state_s* s, int32_t target_input_value_count, int t, bool bounds_set) { // max nesting depth of the column int const max_depth = s->col.max_nesting_depth; // how many input level values we've processed in the page so far int input_value_count = s->input_value_count; // how many leaf values we've processed in the page so far int input_leaf_count = s->input_leaf_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; while (input_value_count < target_input_value_count) { int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // count rows and leaf values int const is_new_row = start_depth == 0 ? 1 : 0; uint32_t const warp_row_count_mask = ballot(is_new_row); int const is_new_leaf = (d >= s->page.nesting[max_depth - 1].max_def_level) ? 1 : 0; uint32_t const warp_leaf_count_mask = ballot(is_new_leaf); // is this thread within row bounds? on the first pass we don't know the bounds, so we will be // computing the full size of the column. on the second pass, we will know our actual row // bounds, so the computation will cap sizes properly. int in_row_bounds = 1; if (bounds_set) { // absolute row index int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; uint32_t const row_bounds_mask = ballot(in_row_bounds); int const first_thread_in_range = __ffs(row_bounds_mask) - 1; // if we've found the beginning of the first row, mark down the position // in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values) if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) { // how many values we've skipped in the rep/def levels s->page.skipped_values = input_value_count + first_thread_in_range; // how many values we've skipped in the actual data stream s->page.skipped_leaf_values = input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1)); } } // increment value counts across all nesting depths for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingInfo* pni = &s->page.nesting[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int const in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0; uint32_t const count_mask = ballot(in_nesting_bounds); if (!t) { pni->batch_size += __popc(count_mask); } } input_value_count += min(32, (target_input_value_count - input_value_count)); input_row_count += __popc(warp_row_count_mask); input_leaf_count += __popc(warp_leaf_count_mask); } // update final page value count if (!t) { s->input_value_count = target_input_value_count; s->input_leaf_count = input_leaf_count; s->input_row_count = input_row_count; } } __device__ size_type gpuGetStringSize(page_state_s* s, int target_count, int t) { auto dict_target_pos = target_count; size_type str_len = 0; if (s->dict_base) { auto const [new_target_pos, len] = gpuDecodeDictionaryIndices<true>(s, target_count, t); dict_target_pos = new_target_pos; str_len = len; } else if ((s->col.data_type & 7) == BYTE_ARRAY) { str_len = gpuInitStringDescriptors(s, target_count, t); } if (!t) { *(volatile int32_t*)&s->dict_pos = dict_target_pos; } return str_len; } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read. Pass as INT_MAX to guarantee reading all rows * @param is_base_pass Whether or not this is the base pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading * @param compute_string_sizes Whether or not we should be computing string sizes * (PageInfo::str_bytes) as part of the pass */ __global__ void __launch_bounds__(block_size) gpuComputePageSizes(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, bool is_base_pass, bool compute_string_sizes) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo* pp = &pages[page_idx]; if (!setupLocalPageInfo(s, pp, chunks, min_row, num_rows, false)) { return; } if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = 0; s->page.str_bytes = 0; s->input_row_count = 0; s->input_value_count = 0; // in the base pass, we're computing the number of rows, make sure we visit absolutely // everything if (is_base_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } // we only need to preprocess hierarchies with repetition in them (ie, hierarchies // containing lists anywhere within). bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0; compute_string_sizes = compute_string_sizes && ((s->col.data_type & 7) == BYTE_ARRAY && s->dtype_len != 4); // various early out optimizations: // - if this is a flat hierarchy (no lists) and is not a string column. in this case we don't need // to do // the expensive work of traversing the level data to determine sizes. we can just compute it // directly. if (!has_repetition && !compute_string_sizes) { int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { if (is_base_pass) { pp->nesting[i].size = pp->num_input_values; } pp->nesting[i].batch_size = pp->num_input_values; } d += blockDim.x; } return; } // - if this page is not at the beginning or end of the trim bounds, the batch size is // the full page size if (!is_base_pass && s->num_rows == s->page.num_rows) { int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { pp->nesting[i].batch_size = pp->nesting[i].size; } d += blockDim.x; } return; } // - if this page is completely trimmed, zero out sizes. if (!is_base_pass && s->num_rows == 0) { int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { pp->nesting[i].batch_size = 0; } d += blockDim.x; } return; } // at this point we are going to be fully recomputing batch information // zero sizes int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].batch_size = 0; } d += blockDim.x; } __syncthreads(); // optimization : it might be useful to have a version of gpuDecodeStream that could go wider than // 1 warp. Currently it only uses 1 warp so that it can overlap work with the value decoding step // when in the actual value decoding kernel. However, during this preprocess step we have no such // limits - we could go as wide as block_size if (t < 32) { constexpr int batch_size = 32; int target_input_count = batch_size; while (!s->error && s->input_value_count < s->num_input_values) { // decode repetition and definition levels. these will attempt to decode at // least up to the target, but may decode a few more. if (has_repetition) { gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION); __syncwarp(); // we may have decoded different amounts from each stream, so only process what we've been int actual_input_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdatePageSizes(s, actual_input_count, t, !is_base_pass); if (compute_string_sizes) { auto const str_len = gpuGetStringSize(s, s->input_leaf_count, t); if (!t) { s->page.str_bytes += str_len; } } target_input_count = actual_input_count + batch_size; __syncwarp(); } } // update output results: // - real number of rows for the whole page // - nesting sizes for the whole page // - skipped value information for trimmed pages // - string bytes if (is_base_pass) { // nesting level 0 is the root column, so the size is also the # of rows if (!t) { pp->num_rows = s->page.nesting[0].batch_size; } // store off this batch size as the "full" size int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { pp->nesting[i].size = pp->nesting[i].batch_size; } d += blockDim.x; } } if (!t) { pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; pp->str_bytes = s->page.str_bytes; } } /** * @brief Kernel for co the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read */ __global__ void __launch_bounds__(block_size) gpuDecodePageData( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, true)) { return; } // if we have no rows to do (eg, in a skip_rows/num_rows case) if (s->num_rows == 0) { return; } if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { out_thread0 = ((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; if (t < out_thread0) { target_pos = min(src_pos + 2 * (block_size - out_thread0), s->nz_count + (block_size - out_thread0)); } else { target_pos = min(s->nz_count, src_pos + block_size - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels(s, target_pos, t); } else if (t < out_thread0) { // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices<false>(s, src_target_pos, t & 0x1f).first; } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY) { gpuInitStringDescriptors(s, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int dtype = s->col.data_type & 7; src_pos += t - out_thread0; // the position in the output column/buffer int dst_pos = s->nz_idx[rolling_index(src_pos)]; // for the flat hierarchy case we will be reading from the beginning of the value stream, // regardless of the value of first_row. so adjust our destination offset accordingly. // example: // - user has passed skip_rows = 2, so our first_row to output is 2 // - the row values we get from nz_idx will be // 0, 1, 2, 3, 4 .... // - by shifting these values by first_row, the sequence becomes // -1, -2, 0, 1, 2 ... // - so we will end up ignoring the first two input rows, and input rows 2..n will // get written to the output starting at position 0. // if (!has_repetition) { dst_pos -= s->first_row; } // target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values // before first_row) in the flat hierarchy case. if (src_pos < target_pos && dst_pos >= 0) { // src_pos represents the logical row position we want to read from. But in the case of // nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position // has to take into account the # of values we have to skip in the page to get to the // desired logical row. For flat hierarchies, skipped_leaf_values will always be 0. uint32_t val_src_pos = src_pos + skipped_leaf_values; // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void* dst = s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len; if (dtype == BYTE_ARRAY) { if (s->col.converted_type == DECIMAL) { auto const [ptr, len] = gpuGetStringData(s, val_src_pos); auto const decimal_precision = s->col.decimal_precision; if (decimal_precision <= MAX_DECIMAL32_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int32_t*>(dst)); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int64_t*>(dst)); } else { gpuOutputByteArrayAsInt(ptr, len, static_cast<__int128_t*>(dst)); } } else { gpuOutputString(s, val_src_pos, dst); } } else if (dtype == BOOLEAN) { gpuOutputBoolean(s, val_src_pos, static_cast<uint8_t*>(dst)); } else if (s->col.converted_type == DECIMAL) { switch (dtype) { case INT32: gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); break; case INT64: gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst)); break; default: if (s->dtype_len_in <= sizeof(int32_t)) { gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int32_t*>(dst)); } else if (s->dtype_len_in <= sizeof(int64_t)) { gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<__int128_t*>(dst)); } break; } } else if (dtype == INT96) { gpuOutputInt96Timestamp(s, val_src_pos, static_cast<int64_t*>(dst)); } else if (dtype_len == 8) { if (s->dtype_len_in == 4) { // Reading INT32 TIME_MILLIS into 64-bit DURATION_MILLISECONDS // TIME_MILLIS is the only duration type stored as int32: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#deprecated-time-convertedtype gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); } else if (s->ts_scale) { gpuOutputInt64Timestamp(s, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst)); } } else if (dtype_len == 4) { gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); } else { gpuOutputGeneric(s, val_src_pos, static_cast<uint8_t*>(dst), dtype_len); } } if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; } } __syncthreads(); } } } // anonymous namespace /** * @copydoc cudf::io::parquet::gpu::ComputePageSizes */ void ComputePageSizes(hostdevice_vector<PageInfo>& pages, hostdevice_vector<ColumnChunkDesc> const& chunks, size_t min_row, size_t num_rows, bool compute_num_rows, bool compute_string_sizes, rmm::cuda_stream_view stream) { dim3 dim_block(block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // This computes the size for the entire page, not taking row bounds into account. // If uses_custom_row_bounds is set to true, we have to do a second pass later that "trims" // the starting and ending read values to account for these bounds. hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows, compute_num_rows, compute_string_sizes); } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ void __host__ DecodePageData(hostdevice_vector<PageInfo>& pages, hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page hipLaunchKernelGGL(( gpuDecodePageData), dim3(dim_grid), dim3(dim_block), 0, stream.value(), pages.device_ptr(), chunks, min_row, num_rows); } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
55c4ddede9aa369b210bd20a5afd2bb8d9a28c9b.cu
/* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.hpp" #include <io/utilities/block_utils.cuh> #include <io/utilities/column_buffer.hpp> #include <cuda/std/tuple> #include <cudf/detail/utilities/assert.cuh> #include <cudf/detail/utilities/hash_functions.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/strings/string_view.hpp> #include <cudf/utilities/bit.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/functional.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/tuple.h> constexpr int block_size = 128; constexpr int non_zero_buffer_size = block_size * 2; inline __device__ uint32_t rotl32(uint32_t x, uint32_t r) { return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r)); } inline __device__ int rolling_index(int index) { return index & (non_zero_buffer_size - 1); } namespace cudf { namespace io { namespace parquet { namespace gpu { namespace { struct page_state_s { const uint8_t* data_start; const uint8_t* data_end; const uint8_t* lvl_end; const uint8_t* dict_base; // ptr to dictionary page data int32_t dict_size; // size of dictionary data int32_t first_row; // First row in page to output int32_t num_rows; // Rows in page to decode (including rows to be skipped) int32_t first_output_value; // First value in page to output int32_t num_input_values; // total # of input/level values in the page int32_t dtype_len; // Output data type length int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits; // # of bits to store dictionary indices uint32_t dict_run; int32_t dict_val; uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] int32_t error; PageInfo page; ColumnChunkDesc col; // (leaf) value decoding int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos; // write position of dictionary indices int32_t src_pos; // input read position of final output value int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale uint32_t nz_idx[non_zero_buffer_size]; // circular buffer of non-null value positions uint32_t dict_idx[non_zero_buffer_size]; // Dictionary index, boolean, or string offset values uint32_t str_len[non_zero_buffer_size]; // String length for plain encoding of strings // repetition/definition level decoding int32_t input_value_count; // how many values of the input we've processed int32_t input_row_count; // how many rows of the input we've processed int32_t input_leaf_count; // how many leaf values of the input we've processed uint32_t rep[non_zero_buffer_size]; // circular buffer of repetition level values uint32_t def[non_zero_buffer_size]; // circular buffer of definition level values const uint8_t* lvl_start[NUM_LEVEL_TYPES]; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded int32_t row_index_lower_bound; // lower bound of row indices we should process }; /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(const uint8_t*& cur, const uint8_t* end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] level_bits The bits required * * @return The length of the section */ __device__ uint32_t InitLevelSection(page_state_s* s, const uint8_t* cur, const uint8_t* end, level_type lvl) { int32_t len; int level_bits = s->col.level_bits[lvl]; Encoding encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else if (encoding == Encoding::RLE) { // V2 only uses RLE encoding, so only perform check here if (s->page.def_lvl_bytes || s->page.rep_lvl_bytes) { len = lvl == level_type::DEFINITION ? s->page.def_lvl_bytes : s->page.rep_lvl_bytes; } else if (cur + 4 < end) { len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; } else { len = 0; s->error = 2; } if (!s->error) { uint32_t run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { int v = (cur < end) ? cur[0] : 0; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } s->lvl_start[lvl] = cur; if (cur > end) { s->error = 2; } } } else if (encoding == Encoding::BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else { s->error = 3; len = 0; } return static_cast<uint32_t>(len); } /** * @brief Decode values out of a definition or repetition stream * * @param[in,out] s Page state input/output * @param[in] t target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION */ __device__ void gpuDecodeStream( uint32_t* output, page_state_s* s, int32_t target_count, int t, level_type lvl) { const uint8_t* cur_def = s->lvl_start[lvl]; const uint8_t* end = s->lvl_end; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { const uint8_t* cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end || level_run <= 1) { s->error = 0x10; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = shuffle(sym_len); level_val = shuffle(level_val); level_run = shuffle(level_run); cur_def += sym_len; } if (s->error) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; const uint8_t* cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[idx & (non_zero_buffer_size - 1)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * * @return A pair containing the new output position, and the total length of strings decoded (this * will only be valid on thread 0 and if sizes_only is true) */ template <bool sizes_only> __device__ cuda::std::pair<int, int> gpuDecodeDictionaryIndices(volatile page_state_s* s, int target_pos, int t) { const uint8_t* end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; int str_len = 0; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t* cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } __syncwarp(); is_literal = shuffle(is_literal); batch_len = shuffle(batch_len); // compute dictionary index. int dict_idx = 0; if (t < batch_len) { dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t* p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } // if we're not computing sizes, store off the dictionary index if constexpr (!sizes_only) { s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx; } } // if we're computing sizes, add the length(s) if constexpr (sizes_only) { int const len = [&]() { if (t >= batch_len) { return 0; } // we may end up decoding more indices than we asked for. so don't include those in the // size calculation if (pos + t >= target_pos) { return 0; } // TODO: refactor this with gpuGetStringData / gpuGetStringSize uint32_t const dict_pos = (s->dict_bits > 0) ? dict_idx * sizeof(string_index_pair) : 0; if (target_pos && dict_pos < (uint32_t)s->dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos); return src->second; } return 0; }(); using WarpReduce = cub::WarpReduce<size_type>; __shared__ typename WarpReduce::TempStorage temp_storage; // note: str_len will only be valid on thread 0. str_len += WarpReduce(temp_storage).Sum(len); } pos += batch_len; } return {pos, str_len}; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[in] target_pos Target write position * @param[in] t Thread ID * * @return The new output position */ __device__ int gpuDecodeRleBooleans(volatile page_state_s* s, int target_pos, int t) { const uint8_t* end = s->data_end; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t* cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } __syncwarp(); is_literal = shuffle(is_literal); batch_len = shuffle(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); const uint8_t* p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings and returns total length of all strings * processed * * @param[in,out] s Page state input/output * @param[in] target_pos Target output position * @param[in] t Thread ID * * @return Total length of strings processed */ __device__ size_type gpuInitStringDescriptors(volatile page_state_s* s, int target_pos, int t) { int pos = s->dict_pos; int total_len = 0; // This step is purely serial if (!t) { const uint8_t* cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len; if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } else { len = 0; } s->dict_idx[pos & (non_zero_buffer_size - 1)] = k; s->str_len[pos & (non_zero_buffer_size - 1)] = len; k += len; total_len += len; pos++; } s->dict_val = k; __threadfence_block(); } return total_len; } /** * @brief Retrieves string information for a string at the specified source position * * @param[in] s Page state input * @param[in] src_pos Source position * * @return A pair containing a pointer to the string and its length */ inline __device__ cuda::std::pair<const char*, size_t> gpuGetStringData(volatile page_state_s* s, int src_pos) { const char* ptr = nullptr; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] * sizeof(string_index_pair) : 0; if (dict_pos < (uint32_t)s->dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos); ptr = src->first; len = src->second; } } else { // Plain encoding uint32_t dict_pos = s->dict_idx[src_pos & (non_zero_buffer_size - 1)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<const char*>(s->data_start + dict_pos); len = s->str_len[src_pos & (non_zero_buffer_size - 1)]; } } return {ptr, len}; } /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ inline __device__ void gpuOutputString(volatile page_state_s* s, int src_pos, void* dstv) { auto [ptr, len] = gpuGetStringData(s, src_pos); if (s->dtype_len == 4) { // Output hash. This hash value is used if the option to convert strings to // categoricals is enabled. The seed value is chosen arbitrarily. uint32_t constexpr hash_seed = 33; cudf::string_view const sv{ptr, static_cast<size_type>(len)}; *static_cast<uint32_t*>(dstv) = cudf::detail::MurmurHash3_32<cudf::string_view>{hash_seed}(sv); } else { // Output string descriptor auto* dst = static_cast<string_index_pair*>(dstv); dst->first = ptr; dst->second = len; } } /** * @brief Output a boolean * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputBoolean(volatile page_state_s* s, int src_pos, uint8_t* dst) { *dst = s->dict_idx[src_pos & (non_zero_buffer_size - 1)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t* dst, const uint8_t* src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2* dst, const uint8_t* src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[out] dst Pointer to row output data */ inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst) { using cuda::std::chrono::duration_cast; const uint8_t* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 >= dict_size) { *dst = 0; return; } uint3 v; int64_t nanos, days; v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); v.z = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); cudf::duration_D d_d{ days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow *dst = [&]() { switch (s->col.ts_clock_rate) { case 1: // seconds return duration_cast<duration_s>(d_d).count() + duration_cast<duration_s>(duration_ns{nanos}).count(); case 1'000: // milliseconds return duration_cast<duration_ms>(d_d).count() + duration_cast<duration_ms>(duration_ns{nanos}).count(); case 1'000'000: // microseconds return duration_cast<duration_us>(d_d).count() + duration_cast<duration_us>(duration_ns{nanos}).count(); case 1'000'000'000: // nanoseconds default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos; } }(); } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst) { const uint8_t* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Output a byte array as int. * * @param[in] ptr Pointer to the byte array * @param[in] len Byte array length * @param[out] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputByteArrayAsInt(char const* ptr, int32_t len, T* dst) { T unscaled = 0; for (auto i = 0; i < len; i++) { uint8_t v = ptr[i]; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. unscaled <<= (sizeof(T) - len) * 8; unscaled >>= (sizeof(T) - len) * 8; *dst = unscaled; } /** * @brief Output a fixed-length byte array as int. * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputFixedLenByteArrayAsInt(volatile page_state_s* s, int src_pos, T* dst) { uint32_t const dtype_len_in = s->dtype_len_in; uint8_t const* data = s->dict_base ? s->dict_base : s->data_start; uint32_t const pos = (s->dict_base ? ((s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0) : src_pos) * dtype_len_in; uint32_t const dict_size = s->dict_size; T unscaled = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. if (dtype_len_in < sizeof(T)) { unscaled <<= (sizeof(T) - dtype_len_in) * 8; unscaled >>= (sizeof(T) - dtype_len_in) * 8; } *dst = unscaled; } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> inline __device__ void gpuOutputFast(volatile page_state_s* s, int src_pos, T* dst) { const uint8_t* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ static __device__ void gpuOutputGeneric(volatile page_state_s* s, int src_pos, uint8_t* dst8, int len) { const uint8_t* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time const uint8_t* src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf; } } } /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] min_row Crop all rows below min_row * @param[in] num_rows Maximum number of rows to read * @param[in] is_decode_step If we are setting up for the decode step (instead of the preprocess * step) */ static __device__ bool setupLocalPageInfo(page_state_s* const s, PageInfo const* p, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, bool is_decode_step) { int t = threadIdx.x; int chunk_idx; // Fetch page info if (t == 0) s->page = *p; __syncthreads(); if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; } // Fetch column chunk info chunk_idx = s->page.chunk_idx; if (t == 0) { s->col = chunks[chunk_idx]; } // zero nested value and valid counts int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].valid_count = 0; s->page.nesting[d + t].value_count = 0; s->page.nesting[d + t].null_count = 0; } d += blockDim.x; } __syncthreads(); if (!t) { s->error = 0; // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t page_start_row = s->col.start_row + s->page.chunk_row; // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t* cur = s->page.page_data; uint8_t* end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type auto const data_type = s->col.data_type & 7; switch (data_type) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: [[fallthrough]]; case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; // Duration types are not included because no scaling is done when reading if (s->col.converted_type == TIMESTAMP_MILLIS) { units = cudf::timestamp_ms::period::den; } else if (s->col.converted_type == TIMESTAMP_MICROS) { units = cudf::timestamp_us::period::den; } else if (s->col.logical_type.TIMESTAMP.unit.isset.NANOS) { units = cudf::timestamp_ns::period::den; } if (units and units != s->col.ts_clock_rate) { s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } } [[fallthrough]]; case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: if (s->col.converted_type == DECIMAL) { auto const decimal_precision = s->col.decimal_precision; s->dtype_len = [decimal_precision]() { if (decimal_precision <= MAX_DECIMAL32_PRECISION) { return sizeof(int32_t); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { return sizeof(int64_t); } else { return sizeof(__int128_t); } }(); } else { s->dtype_len = sizeof(string_index_pair); } break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; s->error |= (s->dtype_len <= 0); break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (s->col.converted_type == DECIMAL && data_type == FIXED_LEN_BYTE_ARRAY) { s->dtype_len = [dtype_len = s->dtype_len]() { if (dtype_len <= sizeof(int32_t)) { return sizeof(int32_t); } else if (dtype_len <= sizeof(int64_t)) { return sizeof(int64_t); } else { return sizeof(__int128_t); } }(); } else if (data_type == INT32) { if (dtype_len_out == 1) { // INT8 output s->dtype_len = 1; } else if (dtype_len_out == 2) { // INT16 output s->dtype_len = 2; } else if (s->col.converted_type == TIME_MILLIS) { // INT64 output s->dtype_len = 8; } } else if (data_type == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if (data_type == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // NOTE: s->page.num_rows, s->col.chunk_row, s->first_row and s->num_rows will be // invalid/bogus during first pass of the preprocess step for nested types. this is ok // because we ignore these values in that stage. { auto const max_row = min_row + num_rows; // if we are totally outside the range of the input, do nothing if ((page_start_row > max_row) || (page_start_row + s->page.num_rows < min_row)) { s->first_row = 0; s->num_rows = 0; } // otherwise else { s->first_row = page_start_row >= min_row ? 0 : min_row - page_start_row; auto const max_page_rows = s->page.num_rows - s->first_row; s->num_rows = (page_start_row + s->first_row) + max_page_rows <= max_row ? max_page_rows : max_row - (page_start_row + s->first_row); } } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is responsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step // // NOTE: in a chunked read situation, s->col.column_data_base and s->col.valid_map_base // will be aliased to memory that has been freed when we get here in the non-decode step, so // we cannot check against nullptr. we'll just check a flag directly. if (is_decode_step) { int max_depth = s->col.max_nesting_depth; for (int idx = 0; idx < max_depth; idx++) { PageNestingInfo* pni = &s->page.nesting[idx]; size_t output_offset; // schemas without lists if (s->col.max_level[level_type::REPETITION] == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for schemas with lists, we've already got the exact value precomputed else { output_offset = pni->page_start_value; } pni->data_out = static_cast<uint8_t*>(s->col.column_data_base[idx]); if (pni->data_out != nullptr) { // anything below max depth with a valid data pointer must be a list, so the // element size is the size of the offset type. uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len; pni->data_out += (output_offset * len); } pni->valid_map = s->col.valid_map_base[idx]; if (pni->valid_map != nullptr) { pni->valid_map += output_offset >> 5; pni->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = nullptr; s->dict_size = 0; switch (s->page.encoding) { case Encoding::PLAIN_DICTIONARY: case Encoding::RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<const uint8_t*>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(string_index_pair); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } break; case Encoding::PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case Encoding::RLE: s->dict_run = 0; break; default: s->error = 1; // Unsupported encoding break; } if (cur > end) { s->error = 1; } s->lvl_end = cur; s->data_start = cur; s->data_end = end; } else { s->error = 1; } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->src_pos = 0; // for flat hierarchies, we can't know how many leaf values to skip unless we do a full // preprocess of the definition levels (since nulls will have no actual decodable value, there // is no direct correlation between # of rows and # of decodable values). so we will start // processing at the beginning of the value stream and disregard any indices that start // before the first row. if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = 0; s->page.skipped_leaf_values = 0; s->input_value_count = 0; s->input_row_count = 0; s->input_leaf_count = 0; s->row_index_lower_bound = -1; } // for nested hierarchies, we have run a preprocess that lets us skip directly to the values // we need to start decoding at else { // input_row_count translates to "how many rows we have processed so far", so since we are // skipping directly to where we want to start decoding, set it to first_row s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int const max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (is_decode_step) { s->input_value_count = s->page.skipped_values > -1 ? s->page.skipped_values : 0; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; // magic number to indicate it hasn't been set for use inside UpdatePageSizes s->page.skipped_leaf_values = 0; } } __threadfence_block(); } __syncthreads(); return true; } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is * also updated * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ static __device__ void store_validity(PageNestingInfo* pni, uint32_t valid_mask, int32_t value_count) { int word_offset = pni->valid_map_offset / 32; int bit_offset = pni->valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { auto relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { pni->valid_map[word_offset] = valid_mask; } else { atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left); } pni->valid_map_offset += value_count; } /** * @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level * D to which we should considered them null or not. * * @param[out] start_depth The start nesting depth * @param[out] end_depth The end nesting depth (inclusive) * @param[out] d The definition level up to which added values are not-null. if t is out of bounds, * d will be -1 * @param[in] s Local page information * @param[in] input_value_count The current count of input level values we have processed * @param[in] target_input_value_count The desired # of input level values we want to process * @param[in] t Thread index */ inline __device__ void get_nesting_bounds(int& start_depth, int& end_depth, int& d, page_state_s* s, int input_value_count, int32_t target_input_value_count, int t) { start_depth = -1; end_depth = -1; d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); d = s->def[index]; // if we have repetition (there are list columns involved) we have to // bound what nesting levels we apply values to if (s->col.max_level[level_type::REPETITION] > 0) { int r = s->rep[index]; start_depth = s->page.nesting[r].start_depth; end_depth = s->page.nesting[d].end_depth; } // for columns without repetition (even ones involving structs) we always // traverse the entire hierarchy. else { start_depth = 0; end_depth = s->col.max_nesting_depth - 1; } } } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[in] t Thread index */ static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s* s, int t) { // max nesting depth of the column int const max_depth = s->col.max_nesting_depth; bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread (the range of nesting depths we // will generate new value indices and validity bits for) int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int const is_new_row = start_depth == 0 ? 1 : 0; uint32_t const warp_row_count_mask = ballot(is_new_row); int32_t const thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within read row bounds? int const in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t const warp_count_mask = ballot((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // walk from 0 to max_depth uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingInfo* pni = &s->page.nesting[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int const in_nesting_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a non-null value uint32_t const is_valid = d >= pni->max_def_level && in_nesting_bounds ? 1 : 0; // compute warp and thread valid counts uint32_t const warp_valid_mask = // for flat schemas, a simple ballot_sync gives us the correct count and bit positions // because every value in the input matches to a value in the output !has_repetition ? ballot(is_valid) : // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so // the validity bit for thread t might actually represent output value t-6. the correct // position for thread t's bit is cur_value_count. for cuda 11 we could use // __reduce_or_sync(), but until then we have to do a warp reduce. WarpReduceOr32(is_valid << thread_value_count); thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index for value decoding if (is_valid && s_idx == max_depth - 1) { int const src_pos = pni->valid_count + thread_valid_count; int const dst_pos = pni->value_count + thread_value_count; // nz_idx is a mapping of src buffer indices to destination buffer indices s->nz_idx[rolling_index(src_pos)] = dst_pos; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth - 1) { uint32_t const next_warp_count_mask = ballot((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column and we're within nesting/row bounds // and we have a valid data_out pointer, it implies this is a list column, so // emit an offset. if (in_nesting_bounds && pni->data_out != nullptr) { int const idx = pni->value_count + thread_value_count; cudf::size_type const ofs = s->page.nesting[s_idx + 1].value_count + next_thread_value_count + s->page.nesting[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type*>(pni->data_out))[idx] = ofs; } } // nested schemas always read and write to the same bounds (that is, read and write positions // are already pre-bounded by first_row/num_rows). flat schemas will start reading at the // first value, even if that is before first_row, because we cannot trivially jump to // the correct position to start reading. since we are about to write the validity vector here // we need to adjust our computed mask to take into account the write row bounds. int const in_write_row_bounds = !has_repetition ? thread_row_index >= s->first_row && thread_row_index < (s->first_row + s->num_rows) : in_row_bounds; int const first_thread_in_write_range = !has_repetition ? __ffs(ballot(in_write_row_bounds)) - 1 : 0; // # of bits to of the validity mask to write out int const warp_valid_mask_bit_count = first_thread_in_write_range < 0 ? 0 : warp_value_count - first_thread_in_write_range; // increment count of valid values, count of total values, and update validity mask if (!t) { if (pni->valid_map != nullptr && warp_valid_mask_bit_count > 0) { uint32_t const warp_output_valid_mask = warp_valid_mask >> first_thread_in_write_range; store_validity(pni, warp_output_valid_mask, warp_valid_mask_bit_count); pni->null_count += warp_valid_mask_bit_count - __popc(warp_output_valid_mask); } pni->valid_count += warp_valid_count; pni->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); __syncwarp(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = s->page.nesting[max_depth - 1].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] t Thread index */ __device__ void gpuDecodeLevels(page_state_s* s, int32_t target_leaf_count, int t) { bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (!s->error && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { if (has_repetition) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION); __syncwarp(); // because the rep and def streams are encoded separately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t); cur_leaf_count = actual_leaf_count + batch_size; __syncwarp(); } } /** * @brief Process a batch of incoming repetition/definition level values to generate * per-nesting level output column size for this page. * * Each page represents one piece of the overall output column. The total output (cudf) * column sizes are the sum of the values in each individual page. * * @param[in] s The local page info * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] t Thread index * @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows * have been computed for this page (they will only be set in the second/trim pass). */ static __device__ void gpuUpdatePageSizes(page_state_s* s, int32_t target_input_value_count, int t, bool bounds_set) { // max nesting depth of the column int const max_depth = s->col.max_nesting_depth; // how many input level values we've processed in the page so far int input_value_count = s->input_value_count; // how many leaf values we've processed in the page so far int input_leaf_count = s->input_leaf_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; while (input_value_count < target_input_value_count) { int start_depth, end_depth, d; get_nesting_bounds( start_depth, end_depth, d, s, input_value_count, target_input_value_count, t); // count rows and leaf values int const is_new_row = start_depth == 0 ? 1 : 0; uint32_t const warp_row_count_mask = ballot(is_new_row); int const is_new_leaf = (d >= s->page.nesting[max_depth - 1].max_def_level) ? 1 : 0; uint32_t const warp_leaf_count_mask = ballot(is_new_leaf); // is this thread within row bounds? on the first pass we don't know the bounds, so we will be // computing the full size of the column. on the second pass, we will know our actual row // bounds, so the computation will cap sizes properly. int in_row_bounds = 1; if (bounds_set) { // absolute row index int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; uint32_t const row_bounds_mask = ballot(in_row_bounds); int const first_thread_in_range = __ffs(row_bounds_mask) - 1; // if we've found the beginning of the first row, mark down the position // in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values) if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) { // how many values we've skipped in the rep/def levels s->page.skipped_values = input_value_count + first_thread_in_range; // how many values we've skipped in the actual data stream s->page.skipped_leaf_values = input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1)); } } // increment value counts across all nesting depths for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingInfo* pni = &s->page.nesting[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int const in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0; uint32_t const count_mask = ballot(in_nesting_bounds); if (!t) { pni->batch_size += __popc(count_mask); } } input_value_count += min(32, (target_input_value_count - input_value_count)); input_row_count += __popc(warp_row_count_mask); input_leaf_count += __popc(warp_leaf_count_mask); } // update final page value count if (!t) { s->input_value_count = target_input_value_count; s->input_leaf_count = input_leaf_count; s->input_row_count = input_row_count; } } __device__ size_type gpuGetStringSize(page_state_s* s, int target_count, int t) { auto dict_target_pos = target_count; size_type str_len = 0; if (s->dict_base) { auto const [new_target_pos, len] = gpuDecodeDictionaryIndices<true>(s, target_count, t); dict_target_pos = new_target_pos; str_len = len; } else if ((s->col.data_type & 7) == BYTE_ARRAY) { str_len = gpuInitStringDescriptors(s, target_count, t); } if (!t) { *(volatile int32_t*)&s->dict_pos = dict_target_pos; } return str_len; } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read. Pass as INT_MAX to guarantee reading all rows * @param is_base_pass Whether or not this is the base pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading * @param compute_string_sizes Whether or not we should be computing string sizes * (PageInfo::str_bytes) as part of the pass */ __global__ void __launch_bounds__(block_size) gpuComputePageSizes(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, bool is_base_pass, bool compute_string_sizes) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo* pp = &pages[page_idx]; if (!setupLocalPageInfo(s, pp, chunks, min_row, num_rows, false)) { return; } if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = 0; s->page.str_bytes = 0; s->input_row_count = 0; s->input_value_count = 0; // in the base pass, we're computing the number of rows, make sure we visit absolutely // everything if (is_base_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } // we only need to preprocess hierarchies with repetition in them (ie, hierarchies // containing lists anywhere within). bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0; compute_string_sizes = compute_string_sizes && ((s->col.data_type & 7) == BYTE_ARRAY && s->dtype_len != 4); // various early out optimizations: // - if this is a flat hierarchy (no lists) and is not a string column. in this case we don't need // to do // the expensive work of traversing the level data to determine sizes. we can just compute it // directly. if (!has_repetition && !compute_string_sizes) { int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { if (is_base_pass) { pp->nesting[i].size = pp->num_input_values; } pp->nesting[i].batch_size = pp->num_input_values; } d += blockDim.x; } return; } // - if this page is not at the beginning or end of the trim bounds, the batch size is // the full page size if (!is_base_pass && s->num_rows == s->page.num_rows) { int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { pp->nesting[i].batch_size = pp->nesting[i].size; } d += blockDim.x; } return; } // - if this page is completely trimmed, zero out sizes. if (!is_base_pass && s->num_rows == 0) { int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { pp->nesting[i].batch_size = 0; } d += blockDim.x; } return; } // at this point we are going to be fully recomputing batch information // zero sizes int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].batch_size = 0; } d += blockDim.x; } __syncthreads(); // optimization : it might be useful to have a version of gpuDecodeStream that could go wider than // 1 warp. Currently it only uses 1 warp so that it can overlap work with the value decoding step // when in the actual value decoding kernel. However, during this preprocess step we have no such // limits - we could go as wide as block_size if (t < 32) { constexpr int batch_size = 32; int target_input_count = batch_size; while (!s->error && s->input_value_count < s->num_input_values) { // decode repetition and definition levels. these will attempt to decode at // least up to the target, but may decode a few more. if (has_repetition) { gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION); __syncwarp(); // we may have decoded different amounts from each stream, so only process what we've been int actual_input_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdatePageSizes(s, actual_input_count, t, !is_base_pass); if (compute_string_sizes) { auto const str_len = gpuGetStringSize(s, s->input_leaf_count, t); if (!t) { s->page.str_bytes += str_len; } } target_input_count = actual_input_count + batch_size; __syncwarp(); } } // update output results: // - real number of rows for the whole page // - nesting sizes for the whole page // - skipped value information for trimmed pages // - string bytes if (is_base_pass) { // nesting level 0 is the root column, so the size is also the # of rows if (!t) { pp->num_rows = s->page.nesting[0].batch_size; } // store off this batch size as the "full" size int d = 0; while (d < s->page.num_nesting_levels) { auto const i = d + t; if (i < s->page.num_nesting_levels) { pp->nesting[i].size = pp->nesting[i].batch_size; } d += blockDim.x; } } if (!t) { pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; pp->str_bytes = s->page.str_bytes; } } /** * @brief Kernel for co the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read */ __global__ void __launch_bounds__(block_size) gpuDecodePageData( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, true)) { return; } // if we have no rows to do (eg, in a skip_rows/num_rows case) if (s->num_rows == 0) { return; } if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { out_thread0 = ((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; if (t < out_thread0) { target_pos = min(src_pos + 2 * (block_size - out_thread0), s->nz_count + (block_size - out_thread0)); } else { target_pos = min(s->nz_count, src_pos + block_size - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels(s, target_pos, t); } else if (t < out_thread0) { // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices<false>(s, src_target_pos, t & 0x1f).first; } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY) { gpuInitStringDescriptors(s, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int dtype = s->col.data_type & 7; src_pos += t - out_thread0; // the position in the output column/buffer int dst_pos = s->nz_idx[rolling_index(src_pos)]; // for the flat hierarchy case we will be reading from the beginning of the value stream, // regardless of the value of first_row. so adjust our destination offset accordingly. // example: // - user has passed skip_rows = 2, so our first_row to output is 2 // - the row values we get from nz_idx will be // 0, 1, 2, 3, 4 .... // - by shifting these values by first_row, the sequence becomes // -1, -2, 0, 1, 2 ... // - so we will end up ignoring the first two input rows, and input rows 2..n will // get written to the output starting at position 0. // if (!has_repetition) { dst_pos -= s->first_row; } // target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values // before first_row) in the flat hierarchy case. if (src_pos < target_pos && dst_pos >= 0) { // src_pos represents the logical row position we want to read from. But in the case of // nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position // has to take into account the # of values we have to skip in the page to get to the // desired logical row. For flat hierarchies, skipped_leaf_values will always be 0. uint32_t val_src_pos = src_pos + skipped_leaf_values; // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void* dst = s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len; if (dtype == BYTE_ARRAY) { if (s->col.converted_type == DECIMAL) { auto const [ptr, len] = gpuGetStringData(s, val_src_pos); auto const decimal_precision = s->col.decimal_precision; if (decimal_precision <= MAX_DECIMAL32_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int32_t*>(dst)); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int64_t*>(dst)); } else { gpuOutputByteArrayAsInt(ptr, len, static_cast<__int128_t*>(dst)); } } else { gpuOutputString(s, val_src_pos, dst); } } else if (dtype == BOOLEAN) { gpuOutputBoolean(s, val_src_pos, static_cast<uint8_t*>(dst)); } else if (s->col.converted_type == DECIMAL) { switch (dtype) { case INT32: gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); break; case INT64: gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst)); break; default: if (s->dtype_len_in <= sizeof(int32_t)) { gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int32_t*>(dst)); } else if (s->dtype_len_in <= sizeof(int64_t)) { gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<__int128_t*>(dst)); } break; } } else if (dtype == INT96) { gpuOutputInt96Timestamp(s, val_src_pos, static_cast<int64_t*>(dst)); } else if (dtype_len == 8) { if (s->dtype_len_in == 4) { // Reading INT32 TIME_MILLIS into 64-bit DURATION_MILLISECONDS // TIME_MILLIS is the only duration type stored as int32: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#deprecated-time-convertedtype gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); } else if (s->ts_scale) { gpuOutputInt64Timestamp(s, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst)); } } else if (dtype_len == 4) { gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); } else { gpuOutputGeneric(s, val_src_pos, static_cast<uint8_t*>(dst), dtype_len); } } if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; } } __syncthreads(); } } } // anonymous namespace /** * @copydoc cudf::io::parquet::gpu::ComputePageSizes */ void ComputePageSizes(hostdevice_vector<PageInfo>& pages, hostdevice_vector<ColumnChunkDesc> const& chunks, size_t min_row, size_t num_rows, bool compute_num_rows, bool compute_string_sizes, rmm::cuda_stream_view stream) { dim3 dim_block(block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // This computes the size for the entire page, not taking row bounds into account. // If uses_custom_row_bounds is set to true, we have to do a second pass later that "trims" // the starting and ending read values to account for these bounds. gpuComputePageSizes<<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, compute_num_rows, compute_string_sizes); } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ void __host__ DecodePageData(hostdevice_vector<PageInfo>& pages, hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page gpuDecodePageData<<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows); } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
e07d1c75c6c0336d32764f56800c45bc6ac01490.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include "stats/mean.h" #include "stats/mean_center.h" #include "random/rng.h" #include "test_utils.h" #include "matrix/math.h" namespace MLCommon { namespace Stats { template <typename T> struct MeanCenterInputs { T tolerance, mean; int rows, cols; bool sample, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MeanCenterInputs<T>& dims) { return os; } template <typename T> class MeanCenterTest: public ::testing::TestWithParam<MeanCenterInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<MeanCenterInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int rows = params.rows, cols = params.cols; int len = rows * cols; allocate(data, len); allocate(mean_act, cols); r.normal(data, len, params.mean, (T)1.0); mean(mean_act, data, cols, rows, params.sample, params.rowMajor); meanCenter(data, mean_act, cols, rows, params.rowMajor); mean(mean_act, data, cols, rows, params.sample, params.rowMajor); Matrix::matrixVectorBinarySub(data, mean_act, rows, cols, false); mean(mean_act, data, cols, rows, params.sample, params.rowMajor); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(mean_act)); } protected: MeanCenterInputs<T> params; T *data, *mean_act; }; const std::vector<MeanCenterInputs<float> > inputsf = { {0.05f, 1.f, 1024, 32, true, false, 1234ULL}, {0.05f, 1.f, 1024, 64, true, false, 1234ULL}, {0.05f, 1.f, 1024, 128, true, false, 1234ULL}, {0.05f, 1.f, 1024, 256, true, false, 1234ULL}, {0.05f, -1.f, 1024, 32, false, false, 1234ULL}, {0.05f, -1.f, 1024, 64, false, false, 1234ULL}, {0.05f, -1.f, 1024, 128, false, false, 1234ULL}, {0.05f, -1.f, 1024, 256, false, false, 1234ULL}, {0.05f, 1.f, 1024, 32, true, true, 1234ULL}, {0.05f, 1.f, 1024, 64, true, true, 1234ULL}, {0.05f, 1.f, 1024, 128, true, true, 1234ULL}, {0.05f, 1.f, 1024, 256, true, true, 1234ULL}, {0.05f, -1.f, 1024, 32, false, true, 1234ULL}, {0.05f, -1.f, 1024, 64, false, true, 1234ULL}, {0.05f, -1.f, 1024, 128, false, true, 1234ULL}, {0.05f, -1.f, 1024, 256, false, true, 1234ULL} }; const std::vector<MeanCenterInputs<double> > inputsd = { {0.05, 1.0, 1024, 32, true, false, 1234ULL}, {0.05, 1.0, 1024, 64, true, false, 1234ULL}, {0.05, 1.0, 1024, 128, true, false, 1234ULL}, {0.05, 1.0, 1024, 256, true, false, 1234ULL}, {0.05, -1.0, 1024, 32, false, false, 1234ULL}, {0.05, -1.0, 1024, 64, false, false, 1234ULL}, {0.05, -1.0, 1024, 128, false, false, 1234ULL}, {0.05, -1.0, 1024, 256, false, false, 1234ULL}, {0.05, 1.0, 1024, 32, true, true, 1234ULL}, {0.05, 1.0, 1024, 64, true, true, 1234ULL}, {0.05, 1.0, 1024, 128, true, true, 1234ULL}, {0.05, 1.0, 1024, 256, true, true, 1234ULL}, {0.05, -1.0, 1024, 32, false, true, 1234ULL}, {0.05, -1.0, 1024, 64, false, true, 1234ULL}, {0.05, -1.0, 1024, 128, false, true, 1234ULL}, {0.05, -1.0, 1024, 256, false, true, 1234ULL} }; typedef MeanCenterTest<float> MeanCenterTestF; TEST_P(MeanCenterTestF, Result) { ASSERT_TRUE(devArrMatch(0.f, mean_act, params.cols, CompareApprox<float>(params.tolerance))); } typedef MeanCenterTest<double> MeanCenterTestD; TEST_P(MeanCenterTestD, Result){ ASSERT_TRUE(devArrMatch(0.0, mean_act, params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
e07d1c75c6c0336d32764f56800c45bc6ac01490.cu
#include <gtest/gtest.h> #include "stats/mean.h" #include "stats/mean_center.h" #include "random/rng.h" #include "test_utils.h" #include "matrix/math.h" namespace MLCommon { namespace Stats { template <typename T> struct MeanCenterInputs { T tolerance, mean; int rows, cols; bool sample, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MeanCenterInputs<T>& dims) { return os; } template <typename T> class MeanCenterTest: public ::testing::TestWithParam<MeanCenterInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<MeanCenterInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int rows = params.rows, cols = params.cols; int len = rows * cols; allocate(data, len); allocate(mean_act, cols); r.normal(data, len, params.mean, (T)1.0); mean(mean_act, data, cols, rows, params.sample, params.rowMajor); meanCenter(data, mean_act, cols, rows, params.rowMajor); mean(mean_act, data, cols, rows, params.sample, params.rowMajor); Matrix::matrixVectorBinarySub(data, mean_act, rows, cols, false); mean(mean_act, data, cols, rows, params.sample, params.rowMajor); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(mean_act)); } protected: MeanCenterInputs<T> params; T *data, *mean_act; }; const std::vector<MeanCenterInputs<float> > inputsf = { {0.05f, 1.f, 1024, 32, true, false, 1234ULL}, {0.05f, 1.f, 1024, 64, true, false, 1234ULL}, {0.05f, 1.f, 1024, 128, true, false, 1234ULL}, {0.05f, 1.f, 1024, 256, true, false, 1234ULL}, {0.05f, -1.f, 1024, 32, false, false, 1234ULL}, {0.05f, -1.f, 1024, 64, false, false, 1234ULL}, {0.05f, -1.f, 1024, 128, false, false, 1234ULL}, {0.05f, -1.f, 1024, 256, false, false, 1234ULL}, {0.05f, 1.f, 1024, 32, true, true, 1234ULL}, {0.05f, 1.f, 1024, 64, true, true, 1234ULL}, {0.05f, 1.f, 1024, 128, true, true, 1234ULL}, {0.05f, 1.f, 1024, 256, true, true, 1234ULL}, {0.05f, -1.f, 1024, 32, false, true, 1234ULL}, {0.05f, -1.f, 1024, 64, false, true, 1234ULL}, {0.05f, -1.f, 1024, 128, false, true, 1234ULL}, {0.05f, -1.f, 1024, 256, false, true, 1234ULL} }; const std::vector<MeanCenterInputs<double> > inputsd = { {0.05, 1.0, 1024, 32, true, false, 1234ULL}, {0.05, 1.0, 1024, 64, true, false, 1234ULL}, {0.05, 1.0, 1024, 128, true, false, 1234ULL}, {0.05, 1.0, 1024, 256, true, false, 1234ULL}, {0.05, -1.0, 1024, 32, false, false, 1234ULL}, {0.05, -1.0, 1024, 64, false, false, 1234ULL}, {0.05, -1.0, 1024, 128, false, false, 1234ULL}, {0.05, -1.0, 1024, 256, false, false, 1234ULL}, {0.05, 1.0, 1024, 32, true, true, 1234ULL}, {0.05, 1.0, 1024, 64, true, true, 1234ULL}, {0.05, 1.0, 1024, 128, true, true, 1234ULL}, {0.05, 1.0, 1024, 256, true, true, 1234ULL}, {0.05, -1.0, 1024, 32, false, true, 1234ULL}, {0.05, -1.0, 1024, 64, false, true, 1234ULL}, {0.05, -1.0, 1024, 128, false, true, 1234ULL}, {0.05, -1.0, 1024, 256, false, true, 1234ULL} }; typedef MeanCenterTest<float> MeanCenterTestF; TEST_P(MeanCenterTestF, Result) { ASSERT_TRUE(devArrMatch(0.f, mean_act, params.cols, CompareApprox<float>(params.tolerance))); } typedef MeanCenterTest<double> MeanCenterTestD; TEST_P(MeanCenterTestD, Result){ ASSERT_TRUE(devArrMatch(0.0, mean_act, params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
29f86b77a6047ce521e9629f958e79614ba95493.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "merge.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *img_all = NULL; hipMalloc(&img_all, XSIZE*YSIZE); unsigned char *img = NULL; hipMalloc(&img, XSIZE*YSIZE); float *selection = NULL; hipMalloc(&selection, XSIZE*YSIZE); int n = XSIZE*YSIZE; int stride = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( merge), dim3(gridBlock),dim3(threadBlock), 0, 0, img_all,img,selection,n,stride); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( merge), dim3(gridBlock),dim3(threadBlock), 0, 0, img_all,img,selection,n,stride); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( merge), dim3(gridBlock),dim3(threadBlock), 0, 0, img_all,img,selection,n,stride); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
29f86b77a6047ce521e9629f958e79614ba95493.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "merge.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *img_all = NULL; cudaMalloc(&img_all, XSIZE*YSIZE); unsigned char *img = NULL; cudaMalloc(&img, XSIZE*YSIZE); float *selection = NULL; cudaMalloc(&selection, XSIZE*YSIZE); int n = XSIZE*YSIZE; int stride = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); merge<<<gridBlock,threadBlock>>>(img_all,img,selection,n,stride); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { merge<<<gridBlock,threadBlock>>>(img_all,img,selection,n,stride); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { merge<<<gridBlock,threadBlock>>>(img_all,img,selection,n,stride); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
db6cc1874294b752f29c643a78ab317622962da1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define NUM_THREADS 1000 #define NUM_BLOCKS 100 #define HH 1e7 typedef struct { int width; int height; double* elements; } Matrix; extern "C" void fkpaths(double *domain, Matrix SITES, Matrix OXY, Matrix UV, double *KXY, Matrix FKSOL); #define CUDA_CALL(x) do { if ((x) != hipSuccess) { \ printf("Error at %s : %d \n",__FILE__, __LINE__);\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if ((x) != HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s : %d\n",__FILE__, __LINE__);\ return EXIT_FAILURE;}} while(0) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } //################################################################### __global__ void setup_kernel(hiprandState_t *state) { int bid=blockIdx.x; int tid=threadIdx.x; int NPATHS = blockDim.x; int thread=bid*NPATHS+tid; // each thread gets same seed, different seq number, no offset hiprand_init(1234,thread,0,&state[thread]); } //################################################################## __global__ void gpu_fkpaths(hiprandState_t *state, double *dev_domain, Matrix dev_OXY, Matrix dev_UV, double *dev_KXY, Matrix dev_SITES, Matrix dev_FKSOL){ int bid = blockIdx.x; int tid = threadIdx.x; int NPATHS = blockDim.x; int thread = bid*NPATHS + tid; // copy state to local memory for efficiency; hiprandState_t localState=state[thread]; int M,N; M = dev_OXY.height; N = dev_OXY.width; double xc,yc; //x,y current double xn,yn; //x,y new double tau=0.0; double delx; delx=(dev_domain[1]-dev_domain[0])/(double)(N-1); double dely; dely=(dev_domain[3]-dev_domain[2])/(double)(M-1); xc = dev_SITES.elements[bid*dev_SITES.width + 0]; yc = dev_SITES.elements[bid*dev_SITES.width + 1]; int i,j; double uc,vc; tau = 0.0; while( ((xc - dev_domain[0])*(xc - dev_domain[1])<0) && ((yc - dev_domain[2])*(yc - dev_domain[3])<0) ){ //find uvindx j = ceil( (xc - dev_domain[0])/delx ); i = ceil( (yc - dev_domain[2])/dely ); if (j<0){ j=0; } else{ if (j>(N-1)) j=N-1; } if (i<0){ i=0; } else{ if (i>(M-1)) i=M-1; } uc=-dev_UV.elements[i*dev_UV.width+j]; vc=-dev_UV.elements[i*dev_UV.width+N+j]; xn = xc + HH * uc + sqrt(HH)*sqrt(2*dev_KXY[0])*hiprand_normal(&localState); yn = yc + HH * vc + sqrt(HH)*sqrt(2*dev_KXY[1])*hiprand_normal(&localState); xc=xn; yc=yn; tau = tau + HH; } double lam = 1e-11; int II,JJ; JJ = ceil( (xc - dev_domain[0])/delx ); II = ceil( (yc - dev_domain[2])/dely ); if (JJ<0){ JJ=0; } else{ if (JJ>(N-1)) JJ=N-1; } if (II<0){ II=0; } else{ if (II>(M-1)) II=M-1; } dev_FKSOL.elements[bid * dev_FKSOL.width + tid] = dev_OXY.elements[II * dev_OXY.width + JJ] * exp(-lam*tau); //dev_FKSOL.elements[bid * dev_FKSOL.width + tid] = (double)bid; // copy state back to global memory state[thread]=localState; } void fkpaths(double *domain, Matrix SITES, Matrix OXY, Matrix UV, double *KXY, Matrix FKSOL){ int NOBS; int NPATHS; NOBS = FKSOL.height; NPATHS = FKSOL.width; double *dev_domain; gpuErrchk( hipMalloc( (void **)&dev_domain, 4*sizeof(double)) ); gpuErrchk( hipMemcpy(dev_domain, domain, 4*sizeof(double), hipMemcpyHostToDevice) ); Matrix dev_OXY; dev_OXY.height=OXY.height; dev_OXY.width=OXY.width; gpuErrchk( hipMalloc( (void **)&dev_OXY.elements, dev_OXY.height*dev_OXY.width*sizeof(double)) ); gpuErrchk( hipMemcpy(dev_OXY.elements, OXY.elements, dev_OXY.height*dev_OXY.width*sizeof(double), hipMemcpyHostToDevice) ); Matrix dev_UV; dev_UV.height=UV.height; dev_UV.width=UV.width; gpuErrchk( hipMalloc( (void **)&dev_UV.elements, dev_UV.height*dev_UV.width*sizeof(double)) ); gpuErrchk( hipMemcpy(dev_UV.elements, UV.elements, dev_UV.height*dev_UV.width*sizeof(double), hipMemcpyHostToDevice) ); double *dev_KXY; gpuErrchk( hipMalloc( (void **)&dev_KXY, 2*sizeof(double)) ); gpuErrchk( hipMemcpy(dev_KXY, KXY, 2*sizeof(double), hipMemcpyHostToDevice) ); Matrix dev_SITES; dev_SITES.height = SITES.height; dev_SITES.width = SITES.width; gpuErrchk( hipMalloc( (void **)&dev_SITES.elements, dev_SITES.height*dev_SITES.width*sizeof(double) ) ); gpuErrchk( hipMemcpy(dev_SITES.elements, SITES.elements, dev_SITES.height * dev_SITES.width * sizeof(double), hipMemcpyHostToDevice) ); Matrix dev_FKSOL; dev_FKSOL.height=NOBS; dev_FKSOL.width=NPATHS; gpuErrchk( hipMalloc( (void **)&dev_FKSOL.elements, dev_FKSOL.height*dev_FKSOL.width*sizeof(double)) ); gpuErrchk( hipPeekAtLastError() ); hiprandState_t *devStates; gpuErrchk ( hipMalloc( (void **)&devStates, NPATHS*sizeof(hiprandState_t)) ); hipLaunchKernelGGL(( setup_kernel), dim3(NOBS), dim3(NPATHS), 0, 0, devStates); gpuErrchk( hipPeekAtLastError() ); hipLaunchKernelGGL(( gpu_fkpaths), dim3(NOBS),dim3(NPATHS), 0, 0, devStates, dev_domain, dev_OXY, dev_UV, dev_KXY, dev_SITES, dev_FKSOL); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipMemcpy(FKSOL.elements, dev_FKSOL.elements, FKSOL.height * FKSOL.width * sizeof(double), hipMemcpyDeviceToHost) ); gpuErrchk( hipPeekAtLastError() ); //gpuErrchk( hipDeviceSynchronize() ); //printf("test = %f\n",FKSOL.elements[3]); //free hipFree(dev_domain); hipFree(dev_OXY.elements); hipFree(dev_UV.elements); hipFree(dev_KXY); hipFree(dev_SITES.elements); hipFree(dev_FKSOL.elements); //printf("%s\n", "done."); }
db6cc1874294b752f29c643a78ab317622962da1.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> #include <curand.h> #include <curand_kernel.h> #define NUM_THREADS 1000 #define NUM_BLOCKS 100 #define HH 1e7 typedef struct { int width; int height; double* elements; } Matrix; extern "C" void fkpaths(double *domain, Matrix SITES, Matrix OXY, Matrix UV, double *KXY, Matrix FKSOL); #define CUDA_CALL(x) do { if ((x) != cudaSuccess) { \ printf("Error at %s : %d \n",__FILE__, __LINE__);\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if ((x) != CURAND_STATUS_SUCCESS) { \ printf("Error at %s : %d\n",__FILE__, __LINE__);\ return EXIT_FAILURE;}} while(0) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //################################################################### __global__ void setup_kernel(curandState *state) { int bid=blockIdx.x; int tid=threadIdx.x; int NPATHS = blockDim.x; int thread=bid*NPATHS+tid; // each thread gets same seed, different seq number, no offset curand_init(1234,thread,0,&state[thread]); } //################################################################## __global__ void gpu_fkpaths(curandState *state, double *dev_domain, Matrix dev_OXY, Matrix dev_UV, double *dev_KXY, Matrix dev_SITES, Matrix dev_FKSOL){ int bid = blockIdx.x; int tid = threadIdx.x; int NPATHS = blockDim.x; int thread = bid*NPATHS + tid; // copy state to local memory for efficiency; curandState localState=state[thread]; int M,N; M = dev_OXY.height; N = dev_OXY.width; double xc,yc; //x,y current double xn,yn; //x,y new double tau=0.0; double delx; delx=(dev_domain[1]-dev_domain[0])/(double)(N-1); double dely; dely=(dev_domain[3]-dev_domain[2])/(double)(M-1); xc = dev_SITES.elements[bid*dev_SITES.width + 0]; yc = dev_SITES.elements[bid*dev_SITES.width + 1]; int i,j; double uc,vc; tau = 0.0; while( ((xc - dev_domain[0])*(xc - dev_domain[1])<0) && ((yc - dev_domain[2])*(yc - dev_domain[3])<0) ){ //find uvindx j = ceil( (xc - dev_domain[0])/delx ); i = ceil( (yc - dev_domain[2])/dely ); if (j<0){ j=0; } else{ if (j>(N-1)) j=N-1; } if (i<0){ i=0; } else{ if (i>(M-1)) i=M-1; } uc=-dev_UV.elements[i*dev_UV.width+j]; vc=-dev_UV.elements[i*dev_UV.width+N+j]; xn = xc + HH * uc + sqrt(HH)*sqrt(2*dev_KXY[0])*curand_normal(&localState); yn = yc + HH * vc + sqrt(HH)*sqrt(2*dev_KXY[1])*curand_normal(&localState); xc=xn; yc=yn; tau = tau + HH; } double lam = 1e-11; int II,JJ; JJ = ceil( (xc - dev_domain[0])/delx ); II = ceil( (yc - dev_domain[2])/dely ); if (JJ<0){ JJ=0; } else{ if (JJ>(N-1)) JJ=N-1; } if (II<0){ II=0; } else{ if (II>(M-1)) II=M-1; } dev_FKSOL.elements[bid * dev_FKSOL.width + tid] = dev_OXY.elements[II * dev_OXY.width + JJ] * exp(-lam*tau); //dev_FKSOL.elements[bid * dev_FKSOL.width + tid] = (double)bid; // copy state back to global memory state[thread]=localState; } void fkpaths(double *domain, Matrix SITES, Matrix OXY, Matrix UV, double *KXY, Matrix FKSOL){ int NOBS; int NPATHS; NOBS = FKSOL.height; NPATHS = FKSOL.width; double *dev_domain; gpuErrchk( cudaMalloc( (void **)&dev_domain, 4*sizeof(double)) ); gpuErrchk( cudaMemcpy(dev_domain, domain, 4*sizeof(double), cudaMemcpyHostToDevice) ); Matrix dev_OXY; dev_OXY.height=OXY.height; dev_OXY.width=OXY.width; gpuErrchk( cudaMalloc( (void **)&dev_OXY.elements, dev_OXY.height*dev_OXY.width*sizeof(double)) ); gpuErrchk( cudaMemcpy(dev_OXY.elements, OXY.elements, dev_OXY.height*dev_OXY.width*sizeof(double), cudaMemcpyHostToDevice) ); Matrix dev_UV; dev_UV.height=UV.height; dev_UV.width=UV.width; gpuErrchk( cudaMalloc( (void **)&dev_UV.elements, dev_UV.height*dev_UV.width*sizeof(double)) ); gpuErrchk( cudaMemcpy(dev_UV.elements, UV.elements, dev_UV.height*dev_UV.width*sizeof(double), cudaMemcpyHostToDevice) ); double *dev_KXY; gpuErrchk( cudaMalloc( (void **)&dev_KXY, 2*sizeof(double)) ); gpuErrchk( cudaMemcpy(dev_KXY, KXY, 2*sizeof(double), cudaMemcpyHostToDevice) ); Matrix dev_SITES; dev_SITES.height = SITES.height; dev_SITES.width = SITES.width; gpuErrchk( cudaMalloc( (void **)&dev_SITES.elements, dev_SITES.height*dev_SITES.width*sizeof(double) ) ); gpuErrchk( cudaMemcpy(dev_SITES.elements, SITES.elements, dev_SITES.height * dev_SITES.width * sizeof(double), cudaMemcpyHostToDevice) ); Matrix dev_FKSOL; dev_FKSOL.height=NOBS; dev_FKSOL.width=NPATHS; gpuErrchk( cudaMalloc( (void **)&dev_FKSOL.elements, dev_FKSOL.height*dev_FKSOL.width*sizeof(double)) ); gpuErrchk( cudaPeekAtLastError() ); curandState *devStates; gpuErrchk ( cudaMalloc( (void **)&devStates, NPATHS*sizeof(curandState)) ); setup_kernel<<<NOBS, NPATHS>>>(devStates); gpuErrchk( cudaPeekAtLastError() ); gpu_fkpaths<<<NOBS,NPATHS>>>(devStates, dev_domain, dev_OXY, dev_UV, dev_KXY, dev_SITES, dev_FKSOL); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaMemcpy(FKSOL.elements, dev_FKSOL.elements, FKSOL.height * FKSOL.width * sizeof(double), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaPeekAtLastError() ); //gpuErrchk( cudaDeviceSynchronize() ); //printf("test = %f\n",FKSOL.elements[3]); //free cudaFree(dev_domain); cudaFree(dev_OXY.elements); cudaFree(dev_UV.elements); cudaFree(dev_KXY); cudaFree(dev_SITES.elements); cudaFree(dev_FKSOL.elements); //printf("%s\n", "done."); }
0f5a3636d1c76f80990d6d5b53f4bb175838e031.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*f * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ #define TILE_WIDTH 32 // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include <matrixmul_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; srand(52); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices M = AllocateMatrix(rand() % 1024, rand() % 1024, 1); N = AllocateMatrix(M.width, rand() % 1024, 1); P = AllocateMatrix(M.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; //(int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; cutReadFilei(argv[1], &params, &data_read, true); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } // M * N on the device MatrixMulOnDevice(M, N, P); printf("GPU computation complete\n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); printf("CPU computation complete\n"); // in this case check if the result is equivalent to the expected soluion CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); for(int i=0;i<P.width*P.height;i++) { if (fabs(P.elements[i]-reference.elements[i])>0.0001) //printf("%i %f\n",i,P.elements[i]/reference.elements[i]); //printf("%i %i\n",i/P.width,i % P.width); printf("%i %f %f\n",i,P.elements[i],reference.elements[i]); } if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); CopyToDeviceMatrix(Md, M); Matrix Nd = AllocateDeviceMatrix(N); CopyToDeviceMatrix(Nd, N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); CopyToDeviceMatrix(Pd, P); // Clear memory // Setup the execution configuration dim3 gridDim(1+(P.width-1)/TILE_WIDTH,1+(P.height-1)/TILE_WIDTH); // dim3 gridDim(P.width/TILE_WIDTH,P.height/TILE_WIDTH); dim3 blockDim(TILE_WIDTH, TILE_WIDTH); // Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel) , dim3(gridDim), dim3(blockDim), 0, 0, Md, Nd, Pd); // Read P from the device CopyFromDeviceMatrix(P, Pd); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; cutReadFilef(file_name, &(M->elements), &data_read, true); return (data_read != (M->height * M->width)); } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
0f5a3636d1c76f80990d6d5b53f4bb175838e031.cu
/*f * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ #define TILE_WIDTH 32 // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include <matrixmul_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; srand(52); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices M = AllocateMatrix(rand() % 1024, rand() % 1024, 1); N = AllocateMatrix(M.width, rand() % 1024, 1); P = AllocateMatrix(M.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; //(int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; cutReadFilei(argv[1], &params, &data_read, true); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } // M * N on the device MatrixMulOnDevice(M, N, P); printf("GPU computation complete\n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); printf("CPU computation complete\n"); // in this case check if the result is equivalent to the expected soluion CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); for(int i=0;i<P.width*P.height;i++) { if (fabs(P.elements[i]-reference.elements[i])>0.0001) //printf("%i %f\n",i,P.elements[i]/reference.elements[i]); //printf("%i %i\n",i/P.width,i % P.width); printf("%i %f %f\n",i,P.elements[i],reference.elements[i]); } if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); CopyToDeviceMatrix(Md, M); Matrix Nd = AllocateDeviceMatrix(N); CopyToDeviceMatrix(Nd, N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); CopyToDeviceMatrix(Pd, P); // Clear memory // Setup the execution configuration dim3 gridDim(1+(P.width-1)/TILE_WIDTH,1+(P.height-1)/TILE_WIDTH); // dim3 gridDim(P.width/TILE_WIDTH,P.height/TILE_WIDTH); dim3 blockDim(TILE_WIDTH, TILE_WIDTH); // Launch the device computation threads! MatrixMulKernel <<<gridDim, blockDim>>> (Md, Nd, Pd); // Read P from the device CopyFromDeviceMatrix(P, Pd); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; cutReadFilef(file_name, &(M->elements), &data_read, true); return (data_read != (M->height * M->width)); } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
fab0636fc38e035ea6ff8128917b68840291fe69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // R-FCN // Written by Yi Li, 2016. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } template <typename Dtype> void PSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } template <typename Dtype> void PSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIPoolingLayer); } // namespace caffe
fab0636fc38e035ea6ff8128917b68840291fe69.cu
// -------------------------------------------------------- // R-FCN // Written by Yi Li, 2016. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } template <typename Dtype> void PSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } template <typename Dtype> void PSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIPoolingLayer); } // namespace caffe
d66ffe45f7cc6cf9088a719f88fd0392a9f5ec05.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <vector> #include <cmath> #include <iostream> #include <fstream> #include <random> #include <typeinfo> #include <thread> #include <cstdlib> #include <algorithm> #include <cassert> #include <numeric> #include <string> #include "norms_cpu.cpp" static const int num_threads = 512; template<typename T> __global__ void sum_reduction(const T* vec, T* res, const int n, const bool power, T p) { __shared__ T partial_sum[num_threads]; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (power) { partial_sum[threadIdx.x] = pow(abs(vec[tid]), p); } else { partial_sum[threadIdx.x] = vec[tid]; } } else { partial_sum[threadIdx.x] = 0.0; } // Sync the threads to have all of the needed data in the shared memory __syncthreads(); // Do the reduction (example with 8 numbers): // a b c d e f g h // a+e b+f c+g d+h e f g h // a+e+c+g b+f+c+g c+g d+h e f g h // a+e+c+g+b+f+c+g b+f+c+g c+g d+h e f g h // And the result is just the 0-th element for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s]; } // We do need to wait for all of the threads to do the sums __syncthreads(); } if (threadIdx.x == 0) { if (gridDim.x == 1) { res[blockIdx.x] = pow(partial_sum[0], (T)(1.0/p)); } else { res[blockIdx.x] = partial_sum[0]; } } } template<typename T> T gpu_lp(T* vec, int vector_length, T p) { // Host-side variables std::vector <T> pows(vector_length); T res; size_t bytes = vector_length * sizeof(T); // ceil(vector_length / num_threads) int num_blocks = (vector_length + num_threads - 1) / num_threads; // Pointers to the device-side variables T *d_vec, *d_res1, *d_res2; // Allocate the memory on the GPU and move the vector (with error handling) hipError_t err = hipSuccess; err = hipMalloc(&d_vec, bytes); if (err != hipSuccess) { std::cout << "Error allocating CUDA memory: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMalloc(&d_res1, bytes); if (err != hipSuccess) { std::cout << "Error allocating CUDA memory: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMalloc(&d_res2, bytes); if (err != hipSuccess) { std::cout << "Error allocating CUDA memory: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMemcpy(d_vec, vec, bytes, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cout << "Error copying memory to device: " << hipGetErrorString(err) << "\n"; return -1; } // The first sum-reduction. Each block gives back a number, so the first num_blocks elements // of the result d_res will have the needed information for us (the partial sums). hipLaunchKernelGGL(( sum_reduction), dim3(num_blocks), dim3(num_threads), 0, 0, d_vec, d_res1, vector_length, true, p); err = hipGetLastError(); if (err != hipSuccess) { std::cout << "CUDA error in kernel call (during sum reduction): " << hipGetErrorString(err) << "\n"; return -1; } // Since a reduction gives us back num_blocks elements, we need to do it until num_blocks == 1. int left; int num_blocks_red = num_blocks; int source_counter = 1; do { left = num_blocks_red; num_blocks_red = (left + num_threads - 1) / num_threads; if (source_counter == 1) { hipLaunchKernelGGL(( sum_reduction), dim3(num_blocks_red), dim3(num_threads), 0, 0, d_res1, d_res2, left, false, p); source_counter = 2; } else { hipLaunchKernelGGL(( sum_reduction), dim3(num_blocks_red), dim3(num_threads), 0, 0, d_res2, d_res1, left, false, p); source_counter = 1; } err = hipGetLastError(); if (err != hipSuccess) { std::cout << "CUDA error in kernel call (during sum reduction): " << hipGetErrorString(err) << "\n"; return -1; } } while (num_blocks_red > 1); // Copying back to the host (only one number; the 0-th element of the d_res), with error handling. if (source_counter == 1) { err = hipMemcpy(&res, d_res1, sizeof(T), hipMemcpyDeviceToHost); } else { err = hipMemcpy(&res, d_res2, sizeof(T), hipMemcpyDeviceToHost); } if (err != hipSuccess) { std::cout << "Error copying memory to host: " << hipGetErrorString(err) << "\n"; return -1; } // Freeing the memory on the device. Not doing so can cause memory-leak. err = hipFree(d_vec); if (err != hipSuccess) { std::cout << "Error freeing allocation: " << hipGetErrorString(err) << "\n"; return -1; } err = hipFree(d_res1); if (err != hipSuccess) { std::cout << "Error freeing allocation: " << hipGetErrorString(err) << "\n"; return -1; } err = hipFree(d_res2); if (err != hipSuccess) { std::cout << "Error freeing allocation: " << hipGetErrorString(err) << "\n"; return -1; } return res; } template<typename T> void one_vector_test(int vector_length, T p, T error) { std::vector <T> vec(vector_length); for (int i = 0; i < vector_length; i++) { vec[i] = (T)1; } auto res_gpu = gpu_lp(vec.data(), vector_length, p); auto res_corr = ::pow(vector_length, (T)(1.0 / p)); if (std::abs(res_corr - res_gpu) < error) { std::cout << "The one-vector test with length " << vector_length << " and p = " << p << " was successful." << std::endl; } else { std::cout << "The random vector test with length " << vector_length << " and p = " << p << " failed." << std::endl; std::cout << "The GPU result: " << res_gpu << std::endl; std::cout << "The analytical reference: " << res_corr << std::endl; std::cout << "The absolute difference: " << std::abs(res_gpu - res_corr) << std::endl; } } template<typename T> void random_vector_test(int vector_length, T p, T error, int cpu_threads) { std::vector <T> vec(vector_length); std::random_device rd; std::mt19937 e2(rd()); std::uniform_real_distribution<> dist(1.0, 1.0); for (int i = 0; i < vector_length; i++) { vec[i] = (T)(dist(e2)); } auto res_gpu = gpu_lp(vec.data(), vector_length, p); auto res_cpu = parallel_lp(vec, vector_length, p, cpu_threads); if (std::abs(res_cpu - res_gpu) < error) { std::cout << "The random vector test with length " << vector_length << " and p = " << p << " was successful." << std::endl; } else { std::cout << "The random vector test with length " << vector_length << " and p = " << p << " failed." << std::endl; std::cout << "The GPU result: " << res_gpu << std::endl; std::cout << "The CPU reference: " << res_cpu << std::endl; std::cout << "The absolute difference: " << std::abs(res_gpu - res_cpu) << std::endl; } } int main() { std::cout << "Validating the GPU calculations..." <<std::endl; one_vector_test(100'000, 1.0f, 1e-4f); one_vector_test(100'000, 2.0, 1e-6); for (int i = 1; i < 10; i++) { if (i % 2 == 0) { random_vector_test(1'000'000, (float)i, 1e-4f, 12); } else { random_vector_test(10'000'000, (double)i, 1e-6, 12); } } return 0; }
d66ffe45f7cc6cf9088a719f88fd0392a9f5ec05.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <vector> #include <cmath> #include <iostream> #include <fstream> #include <random> #include <typeinfo> #include <thread> #include <cstdlib> #include <algorithm> #include <cassert> #include <numeric> #include <string> #include "norms_cpu.cpp" static const int num_threads = 512; template<typename T> __global__ void sum_reduction(const T* vec, T* res, const int n, const bool power, T p) { __shared__ T partial_sum[num_threads]; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (power) { partial_sum[threadIdx.x] = pow(abs(vec[tid]), p); } else { partial_sum[threadIdx.x] = vec[tid]; } } else { partial_sum[threadIdx.x] = 0.0; } // Sync the threads to have all of the needed data in the shared memory __syncthreads(); // Do the reduction (example with 8 numbers): // a b c d e f g h // a+e b+f c+g d+h e f g h // a+e+c+g b+f+c+g c+g d+h e f g h // a+e+c+g+b+f+c+g b+f+c+g c+g d+h e f g h // And the result is just the 0-th element for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s]; } // We do need to wait for all of the threads to do the sums __syncthreads(); } if (threadIdx.x == 0) { if (gridDim.x == 1) { res[blockIdx.x] = pow(partial_sum[0], (T)(1.0/p)); } else { res[blockIdx.x] = partial_sum[0]; } } } template<typename T> T gpu_lp(T* vec, int vector_length, T p) { // Host-side variables std::vector <T> pows(vector_length); T res; size_t bytes = vector_length * sizeof(T); // ceil(vector_length / num_threads) int num_blocks = (vector_length + num_threads - 1) / num_threads; // Pointers to the device-side variables T *d_vec, *d_res1, *d_res2; // Allocate the memory on the GPU and move the vector (with error handling) cudaError_t err = cudaSuccess; err = cudaMalloc(&d_vec, bytes); if (err != cudaSuccess) { std::cout << "Error allocating CUDA memory: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMalloc(&d_res1, bytes); if (err != cudaSuccess) { std::cout << "Error allocating CUDA memory: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMalloc(&d_res2, bytes); if (err != cudaSuccess) { std::cout << "Error allocating CUDA memory: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMemcpy(d_vec, vec, bytes, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cout << "Error copying memory to device: " << cudaGetErrorString(err) << "\n"; return -1; } // The first sum-reduction. Each block gives back a number, so the first num_blocks elements // of the result d_res will have the needed information for us (the partial sums). sum_reduction<<<num_blocks, num_threads>>>(d_vec, d_res1, vector_length, true, p); err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "CUDA error in kernel call (during sum reduction): " << cudaGetErrorString(err) << "\n"; return -1; } // Since a reduction gives us back num_blocks elements, we need to do it until num_blocks == 1. int left; int num_blocks_red = num_blocks; int source_counter = 1; do { left = num_blocks_red; num_blocks_red = (left + num_threads - 1) / num_threads; if (source_counter == 1) { sum_reduction<<<num_blocks_red, num_threads>>>(d_res1, d_res2, left, false, p); source_counter = 2; } else { sum_reduction<<<num_blocks_red, num_threads>>>(d_res2, d_res1, left, false, p); source_counter = 1; } err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "CUDA error in kernel call (during sum reduction): " << cudaGetErrorString(err) << "\n"; return -1; } } while (num_blocks_red > 1); // Copying back to the host (only one number; the 0-th element of the d_res), with error handling. if (source_counter == 1) { err = cudaMemcpy(&res, d_res1, sizeof(T), cudaMemcpyDeviceToHost); } else { err = cudaMemcpy(&res, d_res2, sizeof(T), cudaMemcpyDeviceToHost); } if (err != cudaSuccess) { std::cout << "Error copying memory to host: " << cudaGetErrorString(err) << "\n"; return -1; } // Freeing the memory on the device. Not doing so can cause memory-leak. err = cudaFree(d_vec); if (err != cudaSuccess) { std::cout << "Error freeing allocation: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaFree(d_res1); if (err != cudaSuccess) { std::cout << "Error freeing allocation: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaFree(d_res2); if (err != cudaSuccess) { std::cout << "Error freeing allocation: " << cudaGetErrorString(err) << "\n"; return -1; } return res; } template<typename T> void one_vector_test(int vector_length, T p, T error) { std::vector <T> vec(vector_length); for (int i = 0; i < vector_length; i++) { vec[i] = (T)1; } auto res_gpu = gpu_lp(vec.data(), vector_length, p); auto res_corr = std::pow(vector_length, (T)(1.0 / p)); if (std::abs(res_corr - res_gpu) < error) { std::cout << "The one-vector test with length " << vector_length << " and p = " << p << " was successful." << std::endl; } else { std::cout << "The random vector test with length " << vector_length << " and p = " << p << " failed." << std::endl; std::cout << "The GPU result: " << res_gpu << std::endl; std::cout << "The analytical reference: " << res_corr << std::endl; std::cout << "The absolute difference: " << std::abs(res_gpu - res_corr) << std::endl; } } template<typename T> void random_vector_test(int vector_length, T p, T error, int cpu_threads) { std::vector <T> vec(vector_length); std::random_device rd; std::mt19937 e2(rd()); std::uniform_real_distribution<> dist(1.0, 1.0); for (int i = 0; i < vector_length; i++) { vec[i] = (T)(dist(e2)); } auto res_gpu = gpu_lp(vec.data(), vector_length, p); auto res_cpu = parallel_lp(vec, vector_length, p, cpu_threads); if (std::abs(res_cpu - res_gpu) < error) { std::cout << "The random vector test with length " << vector_length << " and p = " << p << " was successful." << std::endl; } else { std::cout << "The random vector test with length " << vector_length << " and p = " << p << " failed." << std::endl; std::cout << "The GPU result: " << res_gpu << std::endl; std::cout << "The CPU reference: " << res_cpu << std::endl; std::cout << "The absolute difference: " << std::abs(res_gpu - res_cpu) << std::endl; } } int main() { std::cout << "Validating the GPU calculations..." <<std::endl; one_vector_test(100'000, 1.0f, 1e-4f); one_vector_test(100'000, 2.0, 1e-6); for (int i = 1; i < 10; i++) { if (i % 2 == 0) { random_vector_test(1'000'000, (float)i, 1e-4f, 12); } else { random_vector_test(10'000'000, (double)i, 1e-6, 12); } } return 0; }
03e31680c22627b00eb63b877a397f5b5e44f910.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPDX-FileCopyrightText: 2021 Benjamin Brock // // SPDX-License-Identifier: BSD-3-Clause #define __thrust_compiler_fence() __sync_synchronize() #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/array2d.h> #include <cusp/print.h> #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/CudaMatrix.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <thrust/sort.h> #include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp> #include <unordered_map> #include "cusparse_util.hpp" #include "spgemm.hpp" #include "cusp_util.hpp" #include <chrono> #include <essl.h> template <typename T> __device__ __forceinline__ T combiner(T a, T b) { return a*b; } template <typename T> __device__ __forceinline__ T reducer(T a, T b) { return a+b; } template <typename T> __device__ __forceinline__ T initializer() { return T(); } template<typename T> __global__ void spmm_test2( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, T* A_csrVal, T* B_dnVal, T* C_dnVal, int b_ld, int c_ld ) { extern __shared__ int sh[]; int *colInd_sh = sh; T *val_sh = (T *)&sh[(blockDim.y<<5)]; int shmem_offset = (threadIdx.y<<5); int thread_idx = shmem_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<6)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int ptr = lb+threadIdx.x; int offset; T val; T acc1 = initializer<T>(); T acc2 = initializer<T>(); if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; // MODIFY colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { // MODIFY (all) offset = colInd_sh[(shmem_offset+kk)] + cid; val = val_sh[(shmem_offset+kk)]; // acc1 += val*B_dnVal[offset]; acc1 = reducer(acc1, combiner(val, B_dnVal[offset])); // acc2 += val*B_dnVal[offset+32]; acc2 = reducer(acc2, combiner(val, B_dnVal[offset+32])); } __syncwarp(); } // MODIFY (C, all) offset = rid*B_ncols+cid; C_dnVal[offset] = acc1; C_dnVal[offset+32] = acc2; } else { int nout = (B_ncols-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; // MODIFY colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { val = val_sh[(shmem_offset+kk)]; offset = colInd_sh[(shmem_offset+kk)] + cid; // MODIFY (all) if (nout>0) { // acc1 += val*B_dnVal[offset]; acc1 = reducer(acc1, combiner(val, B_dnVal[offset])); } if (nout>1) { // acc2 += val*B_dnVal[offset+32]; acc2 = reducer(acc1, combiner(val, B_dnVal[offset+32])); } } __syncwarp(); } // MODIFY (C, all) offset = rid*B_ncols+cid; if (nout>0) { C_dnVal[offset] = acc1; } if (nout>1) { C_dnVal[(offset+32)] = acc2; } } } } // QUESTION: what to put for tile_row? void spmmWrapper(int tile_row, int A_nrows, int B_ncols, int *A_rowPtr, int *A_colInd, float *A_val, float *B, float *C, int b_ld, int c_ld) { hipLaunchKernelGGL(( spmm_test2<float>), dim3(dim3((A_nrows+tile_row-1)/tile_row, (B_ncols+63)/64, 1)), dim3(dim3(32, tile_row, 1)), 32*tile_row*(sizeof(int)+sizeof(float)),0, A_nrows, B_ncols, A_rowPtr, A_colInd, A_val, B, C, b_ld, c_ld ); hipError_t error; error = hipGetLastError(); CUDA_CHECK(error); } namespace BCL { namespace cuda { template <typename AMatrixType, typename BMatrixType, typename CMatrixType> void spmm_gspmm(AMatrixType& a, BMatrixType& b, CMatrixType& c) { if (a.nnz() == 0) { return; } spmmWrapper(4, a.shape()[0], b.shape()[1], a.rowptr_data(), a.colind_data(), a.values_data(), b.data(), c.data(), b.ld(), c.ld()); } } } int main(int argc, char** argv) { BCL::init(16); BCL::cuda::init(); using T = float; using allocator_type = BCL::cuda::bcl_allocator<T>; std::string fname = "/autofs/nccs-svm1_home2/b2v/pkg/cusplibrary/examples/Matmul/chesapeake_general.mtx"; BCL::CSRMatrix<T, graphblas::Index> mat(fname); auto local_a = BCL::cuda::to_gpu<T, graphblas::Index, allocator_type>(mat); size_t m = local_a.shape()[0]; size_t k = local_a.shape()[1]; size_t n = 8; BCL::cuda::CudaMatrix<T, allocator_type> local_b({k, n}); BCL::cuda::CudaMatrix<T, allocator_type> local_c_cusparse({m, n}); BCL::cuda::CudaMatrix<T, allocator_type> local_c_gspmm({m, n}); local_b = 1; local_c_cusparse = 0; local_c_gspmm = 0; BCL::cuda::spmm_cusparse(local_a, local_b, local_c_cusparse); BCL::cuda::spmm_gspmm(local_a, local_b, local_c_gspmm); std::vector<T> cusparse_data(local_c_cusparse.size()); hipMemcpy(cusparse_data.data(), local_c_cusparse.data(), cusparse_data.size()*sizeof(T), hipMemcpyDeviceToHost); std::vector<T> gspmm_data(local_c_gspmm.size()); hipMemcpy(gspmm_data.data(), local_c_gspmm.data(), gspmm_data.size()*sizeof(T), hipMemcpyDeviceToHost); bool print = false; T eps = 1.0e-5; size_t matching = 0; for (size_t i = 0; i < local_c_gspmm.shape()[0]; i++) { for (size_t j = 0; j < local_c_gspmm.shape()[1]; j++) { size_t idx = i + j*local_c_gspmm.shape()[0]; size_t cusp_idx = i*local_c_gspmm.shape()[1] + j; if (std::abs(cusparse_data[idx] - gspmm_data[cusp_idx]) > eps) { assert(false); if (print) { printf("O %2.2lf != %2.2lf ", cusparse_data[idx], gspmm_data[cusp_idx]); } } else { if (print) { printf("X %2.2lf == %2.2lf ", cusparse_data[idx], gspmm_data[cusp_idx]); } matching++; } } if (print) { printf("\n"); } } BCL::finalize(); return 0; }
03e31680c22627b00eb63b877a397f5b5e44f910.cu
// SPDX-FileCopyrightText: 2021 Benjamin Brock // // SPDX-License-Identifier: BSD-3-Clause #define __thrust_compiler_fence() __sync_synchronize() #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/array2d.h> #include <cusp/print.h> #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/CudaMatrix.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <thrust/sort.h> #include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp> #include <unordered_map> #include "cusparse_util.hpp" #include "spgemm.hpp" #include "cusp_util.hpp" #include <chrono> #include <essl.h> template <typename T> __device__ __forceinline__ T combiner(T a, T b) { return a*b; } template <typename T> __device__ __forceinline__ T reducer(T a, T b) { return a+b; } template <typename T> __device__ __forceinline__ T initializer() { return T(); } template<typename T> __global__ void spmm_test2( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, T* A_csrVal, T* B_dnVal, T* C_dnVal, int b_ld, int c_ld ) { extern __shared__ int sh[]; int *colInd_sh = sh; T *val_sh = (T *)&sh[(blockDim.y<<5)]; int shmem_offset = (threadIdx.y<<5); int thread_idx = shmem_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<6)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int ptr = lb+threadIdx.x; int offset; T val; T acc1 = initializer<T>(); T acc2 = initializer<T>(); if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; // MODIFY colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { // MODIFY (all) offset = colInd_sh[(shmem_offset+kk)] + cid; val = val_sh[(shmem_offset+kk)]; // acc1 += val*B_dnVal[offset]; acc1 = reducer(acc1, combiner(val, B_dnVal[offset])); // acc2 += val*B_dnVal[offset+32]; acc2 = reducer(acc2, combiner(val, B_dnVal[offset+32])); } __syncwarp(); } // MODIFY (C, all) offset = rid*B_ncols+cid; C_dnVal[offset] = acc1; C_dnVal[offset+32] = acc2; } else { int nout = (B_ncols-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; // MODIFY colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { val = val_sh[(shmem_offset+kk)]; offset = colInd_sh[(shmem_offset+kk)] + cid; // MODIFY (all) if (nout>0) { // acc1 += val*B_dnVal[offset]; acc1 = reducer(acc1, combiner(val, B_dnVal[offset])); } if (nout>1) { // acc2 += val*B_dnVal[offset+32]; acc2 = reducer(acc1, combiner(val, B_dnVal[offset+32])); } } __syncwarp(); } // MODIFY (C, all) offset = rid*B_ncols+cid; if (nout>0) { C_dnVal[offset] = acc1; } if (nout>1) { C_dnVal[(offset+32)] = acc2; } } } } // QUESTION: what to put for tile_row? void spmmWrapper(int tile_row, int A_nrows, int B_ncols, int *A_rowPtr, int *A_colInd, float *A_val, float *B, float *C, int b_ld, int c_ld) { spmm_test2<float><<<dim3((A_nrows+tile_row-1)/tile_row, (B_ncols+63)/64, 1), dim3(32, tile_row, 1), 32*tile_row*(sizeof(int)+sizeof(float)),0>>> ( A_nrows, B_ncols, A_rowPtr, A_colInd, A_val, B, C, b_ld, c_ld ); cudaError_t error; error = cudaGetLastError(); CUDA_CHECK(error); } namespace BCL { namespace cuda { template <typename AMatrixType, typename BMatrixType, typename CMatrixType> void spmm_gspmm(AMatrixType& a, BMatrixType& b, CMatrixType& c) { if (a.nnz() == 0) { return; } spmmWrapper(4, a.shape()[0], b.shape()[1], a.rowptr_data(), a.colind_data(), a.values_data(), b.data(), c.data(), b.ld(), c.ld()); } } } int main(int argc, char** argv) { BCL::init(16); BCL::cuda::init(); using T = float; using allocator_type = BCL::cuda::bcl_allocator<T>; std::string fname = "/autofs/nccs-svm1_home2/b2v/pkg/cusplibrary/examples/Matmul/chesapeake_general.mtx"; BCL::CSRMatrix<T, graphblas::Index> mat(fname); auto local_a = BCL::cuda::to_gpu<T, graphblas::Index, allocator_type>(mat); size_t m = local_a.shape()[0]; size_t k = local_a.shape()[1]; size_t n = 8; BCL::cuda::CudaMatrix<T, allocator_type> local_b({k, n}); BCL::cuda::CudaMatrix<T, allocator_type> local_c_cusparse({m, n}); BCL::cuda::CudaMatrix<T, allocator_type> local_c_gspmm({m, n}); local_b = 1; local_c_cusparse = 0; local_c_gspmm = 0; BCL::cuda::spmm_cusparse(local_a, local_b, local_c_cusparse); BCL::cuda::spmm_gspmm(local_a, local_b, local_c_gspmm); std::vector<T> cusparse_data(local_c_cusparse.size()); cudaMemcpy(cusparse_data.data(), local_c_cusparse.data(), cusparse_data.size()*sizeof(T), cudaMemcpyDeviceToHost); std::vector<T> gspmm_data(local_c_gspmm.size()); cudaMemcpy(gspmm_data.data(), local_c_gspmm.data(), gspmm_data.size()*sizeof(T), cudaMemcpyDeviceToHost); bool print = false; T eps = 1.0e-5; size_t matching = 0; for (size_t i = 0; i < local_c_gspmm.shape()[0]; i++) { for (size_t j = 0; j < local_c_gspmm.shape()[1]; j++) { size_t idx = i + j*local_c_gspmm.shape()[0]; size_t cusp_idx = i*local_c_gspmm.shape()[1] + j; if (std::abs(cusparse_data[idx] - gspmm_data[cusp_idx]) > eps) { assert(false); if (print) { printf("O %2.2lf != %2.2lf ", cusparse_data[idx], gspmm_data[cusp_idx]); } } else { if (print) { printf("X %2.2lf == %2.2lf ", cusparse_data[idx], gspmm_data[cusp_idx]); } matching++; } } if (print) { printf("\n"); } } BCL::finalize(); return 0; }
713aab66a802971014ca0ff3d52b6a87d8924ef7.hip
// !!! This is a file automatically generated by hipify!!! #undef USE_DISPATCH // nvcc doesn't support libdispatch extern "C" { #include "ccv.h" } #include <ctype.h> #define CASE_TESTS // so that we don't include public available methods #include "../lib/cuda/cwc_convnet.cu" #include "../lib/ccv_convnet.c" extern "C" void cwc_verify_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params) { int batch = params.mini_batch; int i, j; const int device_id = 0; _cwc_convnet_alloc_reserved_both(convnet, batch, 1, params.layer_params); cwc_convnet_context_t* context = GPU(convnet)->contexts; for (i = 0; i < convnet->rows * convnet->cols * convnet->channels; i++) convnet->mean_activity->data.f32[i] = 128; cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, ccv_size(225, 225), 225, 225, convnet->rows, convnet->cols, convnet->channels, 1000, 0, batch, 0, batch, context->host[device_id].input, context->host[device_id].c); hipMemcpy(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * batch, hipMemcpyHostToDevice); ccv_convnet_t* update_params = _ccv_convnet_update_new(convnet); _ccv_convnet_update_zero(update_params); // first convolutional layer forward propagate ccv_convnet_layer_t* first_gpu_layer = GPU(convnet)->device[device_id].layers; // these are the setups for TITAN, thus, skip the benching phase EXTRA(first_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(first_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(first_gpu_layer)->vary.convolutional.forward.z = 32; hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(first_gpu_layer, first_gpu_layer->input.matrix.rows, first_gpu_layer->input.matrix.cols, batch, context->device[device_id].input, GPU(convnet)->device[device_id].forwards[0], context->device[device_id].data_stream); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); float elapsed_time = 0; hipEventElapsedTime(&elapsed_time, start, stop); hipStreamSynchronize(context->device[device_id].data_stream); printf("%d %d %d, elapsed time for first convolutional layer fprop: %f milliseconds\n", EXTRA(first_gpu_layer)->vary.convolutional.forward.x, EXTRA(first_gpu_layer)->vary.convolutional.forward.y, EXTRA(first_gpu_layer)->vary.convolutional.forward.z, elapsed_time); int first_out_rows, first_out_cols, first_out_partition, first_out_channels = first_gpu_layer->net.convolutional.count; ccv_convnet_make_output(first_gpu_layer, first_gpu_layer->input.matrix.rows, first_gpu_layer->input.matrix.cols, &first_out_rows, &first_out_cols, &first_out_partition); float* first_out = 0; hipHostMalloc(&first_out, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); hipMemcpy(first_out, GPU(convnet)->device[device_id].forwards[0], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate first convolutional layer on GPU\n"); // second average pool layer forward propagate ccv_convnet_layer_t* second_gpu_layer = GPU(convnet)->device[device_id].layers + 1; cwc_convnet_average_pool_forward_propagate(second_gpu_layer, second_gpu_layer->input.matrix.rows, second_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[0], GPU(convnet)->device[device_id].forwards[1], context->device[device_id].data_stream); hipStreamSynchronize(context->device[device_id].data_stream); int second_out_rows, second_out_cols, second_out_partition, second_out_channels = second_gpu_layer->input.matrix.channels; ccv_convnet_make_output(second_gpu_layer, second_gpu_layer->input.matrix.rows, second_gpu_layer->input.matrix.cols, &second_out_rows, &second_out_cols, &second_out_partition); float* second_out = 0; hipHostMalloc(&second_out, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); hipMemcpy(second_out, GPU(convnet)->device[device_id].forwards[1], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate second average pool layer on GPU\n"); // third convolutional layer forward propagate ccv_convnet_layer_t* third_gpu_layer = GPU(convnet)->device[device_id].layers + 2; // these are the setups for TITAN, thus, skip the benching phase EXTRA(third_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(third_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(third_gpu_layer)->vary.convolutional.forward.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(third_gpu_layer, third_gpu_layer->input.matrix.rows, third_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[1], GPU(convnet)->device[device_id].forwards[2], context->device[device_id].data_stream); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for third convolutional layer fprop: %f milliseconds\n", EXTRA(third_gpu_layer)->vary.convolutional.forward.x, EXTRA(third_gpu_layer)->vary.convolutional.forward.y, EXTRA(third_gpu_layer)->vary.convolutional.forward.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); int third_out_rows, third_out_cols, third_out_partition, third_out_channels = third_gpu_layer->net.convolutional.count; ccv_convnet_make_output(third_gpu_layer, third_gpu_layer->input.matrix.rows, third_gpu_layer->input.matrix.cols, &third_out_rows, &third_out_cols, &third_out_partition); float* third_out = 0; hipHostMalloc(&third_out, sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch); hipMemcpy(third_out, GPU(convnet)->device[device_id].forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate third convolutional layer on GPU\n"); // forth average pool layer forward propagate ccv_convnet_layer_t* forth_gpu_layer = GPU(convnet)->device[device_id].layers + 3; cwc_convnet_average_pool_forward_propagate(forth_gpu_layer, forth_gpu_layer->input.matrix.rows, forth_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[2], GPU(convnet)->device[device_id].forwards[3], context->device[device_id].data_stream); hipStreamSynchronize(context->device[device_id].data_stream); int forth_out_rows, forth_out_cols, forth_out_partition, forth_out_channels = forth_gpu_layer->input.matrix.channels; ccv_convnet_make_output(forth_gpu_layer, forth_gpu_layer->input.matrix.rows, forth_gpu_layer->input.matrix.cols, &forth_out_rows, &forth_out_cols, &forth_out_partition); float* forth_out = 0; hipHostMalloc(&forth_out, sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch); hipMemcpy(forth_out, GPU(convnet)->device[device_id].forwards[3], sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate forth average pool layer on GPU\n"); // fifth convolutional layer forward propagate ccv_convnet_layer_t* fifth_gpu_layer = GPU(convnet)->device[device_id].layers + 4; // these are the setups for TITAN, thus, skip the benching phase EXTRA(fifth_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(fifth_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(fifth_gpu_layer)->vary.convolutional.forward.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(fifth_gpu_layer, fifth_gpu_layer->input.matrix.rows, fifth_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[3], GPU(convnet)->device[device_id].forwards[4], context->device[device_id].data_stream); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for fifth convolutional layer fprop: %f milliseconds\n", EXTRA(fifth_gpu_layer)->vary.convolutional.forward.x, EXTRA(fifth_gpu_layer)->vary.convolutional.forward.y, EXTRA(fifth_gpu_layer)->vary.convolutional.forward.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); int fifth_out_rows, fifth_out_cols, fifth_out_partition, fifth_out_channels = fifth_gpu_layer->net.convolutional.count; ccv_convnet_make_output(fifth_gpu_layer, fifth_gpu_layer->input.matrix.rows, fifth_gpu_layer->input.matrix.cols, &fifth_out_rows, &fifth_out_cols, &fifth_out_partition); float* fifth_out = 0; hipHostMalloc(&fifth_out, sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch); hipMemcpy(fifth_out, GPU(convnet)->device[device_id].forwards[4], sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate fifth convolutional layer on GPU\n"); // sixth convolutional layer forward propagate ccv_convnet_layer_t* sixth_gpu_layer = GPU(convnet)->device[device_id].layers + 5; // these are the setups for TITAN, thus, skip the benching phase EXTRA(sixth_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(sixth_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(sixth_gpu_layer)->vary.convolutional.forward.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(sixth_gpu_layer, sixth_gpu_layer->input.matrix.rows, sixth_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[4], GPU(convnet)->device[device_id].forwards[5], context->device[device_id].data_stream); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for sixth convolutional layer fprop: %f milliseconds\n", EXTRA(sixth_gpu_layer)->vary.convolutional.forward.x, EXTRA(sixth_gpu_layer)->vary.convolutional.forward.y, EXTRA(sixth_gpu_layer)->vary.convolutional.forward.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); int sixth_out_rows, sixth_out_cols, sixth_out_partition, sixth_out_channels = sixth_gpu_layer->net.convolutional.count; ccv_convnet_make_output(sixth_gpu_layer, sixth_gpu_layer->input.matrix.rows, sixth_gpu_layer->input.matrix.cols, &sixth_out_rows, &sixth_out_cols, &sixth_out_partition); float* sixth_out = 0; hipHostMalloc(&sixth_out, sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch); hipMemcpy(sixth_out, GPU(convnet)->device[device_id].forwards[5], sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate sixth convolutional layer on GPU\n"); // seventh convolutional layer forward propagate ccv_convnet_layer_t* seventh_gpu_layer = GPU(convnet)->device[device_id].layers + 6; // these are the setups for TITAN, thus, skip the benching phase EXTRA(seventh_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(seventh_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(seventh_gpu_layer)->vary.convolutional.forward.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(seventh_gpu_layer, seventh_gpu_layer->input.matrix.rows, seventh_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[5], GPU(convnet)->device[device_id].forwards[6], context->device[device_id].data_stream); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for seventh convolutional layer fprop: %f milliseconds\n", EXTRA(seventh_gpu_layer)->vary.convolutional.forward.x, EXTRA(seventh_gpu_layer)->vary.convolutional.forward.y, EXTRA(seventh_gpu_layer)->vary.convolutional.forward.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); int seventh_out_rows, seventh_out_cols, seventh_out_partition, seventh_out_channels = seventh_gpu_layer->net.convolutional.count; ccv_convnet_make_output(seventh_gpu_layer, seventh_gpu_layer->input.matrix.rows, seventh_gpu_layer->input.matrix.cols, &seventh_out_rows, &seventh_out_cols, &seventh_out_partition); float* seventh_out = 0; hipHostMalloc(&seventh_out, sizeof(float) * seventh_out_rows * seventh_out_cols * seventh_out_channels * batch); hipMemcpy(seventh_out, GPU(convnet)->device[device_id].forwards[6], sizeof(float) * seventh_out_rows * seventh_out_cols * seventh_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate seventh convolutional layer on GPU\n"); // the last full connect layer forward propagate ccv_convnet_layer_t* eleventh_gpu_layer = GPU(convnet)->device[device_id].layers + 10; float* eleventh_in = 0; hipHostMalloc(&eleventh_in, sizeof(float) * batch * eleventh_gpu_layer->input.node.count); for (i = 0; i < batch; i++) for (j = 0; j < eleventh_gpu_layer->input.node.count; j++) eleventh_in[j * batch + i] = (j - 100 + i) / 200; hipMemcpy(GPU(convnet)->device[device_id].forwards[9], eleventh_in, sizeof(float) * batch * eleventh_gpu_layer->input.node.count, hipMemcpyHostToDevice); hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_full_connect_forward_propagate(eleventh_gpu_layer, 128, GPU(convnet)->device[device_id].forwards[9], GPU(convnet)->device[device_id].forwards[10], GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("elapsed time for eleventh full connect layer fprop: %f milliseconds\n", elapsed_time); float* eleventh_out = 0; hipHostMalloc(&eleventh_out, sizeof(float) * batch * eleventh_gpu_layer->net.full_connect.count); hipMemcpy(eleventh_out, GPU(convnet)->device[device_id].forwards[10], sizeof(float) * batch * eleventh_gpu_layer->net.full_connect.count, hipMemcpyDeviceToHost); printf("finished forward propagate eleventh full connect layer on GPU\n"); // eleventh full connect layer backward propagate ccv_convnet_layer_t* eleventh_gpu_configuration = GPU(convnet)->device[device_id].configurations + 10; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_full_connect_backward_propagate(eleventh_gpu_layer, batch, GPU(convnet)->device[device_id].forwards[10], GPU(convnet)->device[device_id].forwards[10], GPU(convnet)->device[device_id].forwards[9], GPU(convnet)->device[device_id].backwards[10], GPU(convnet)->device[device_id].unit, eleventh_gpu_configuration->w, eleventh_gpu_configuration->bias, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("elapsed time for eleventh full connect layer bprop: %f milliseconds\n", elapsed_time); float* eleventh_back = 0; hipHostMalloc(&eleventh_back, sizeof(float) * eleventh_gpu_layer->input.node.count * batch); hipMemcpy(eleventh_back, GPU(convnet)->device[device_id].backwards[10], sizeof(float) * eleventh_gpu_layer->input.node.count * batch, hipMemcpyDeviceToHost); float* eleventh_grad = 0; hipHostMalloc(&eleventh_grad, sizeof(float) * (eleventh_gpu_layer->wnum + eleventh_gpu_layer->net.full_connect.count)); assert(eleventh_grad); hipMemcpy(eleventh_grad, eleventh_gpu_configuration->w, sizeof(float) * (eleventh_gpu_layer->wnum + eleventh_gpu_layer->net.full_connect.count), hipMemcpyDeviceToHost); printf("finished backward propagate eleventh full connect layer on GPU\n"); // seventh convolutonal layer backward propagate hipMemcpy(GPU(convnet)->device[device_id].backwards[7], GPU(convnet)->device[device_id].forwards[6], sizeof(float) * seventh_out_rows * seventh_out_cols * seventh_out_channels * batch, hipMemcpyDeviceToDevice); ccv_convnet_layer_t* seventh_gpu_configuration = GPU(convnet)->device[device_id].configurations + 6; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.x = 8; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.y = 4; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.z = 32; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.y = 8; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(seventh_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[7], GPU(convnet)->device[device_id].forwards[6], GPU(convnet)->device[device_id].forwards[5], GPU(convnet)->device[device_id].backwards[6], seventh_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for seventh convolutional layer bprop: %f milliseconds\n", EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); assert(hipGetLastError() == hipSuccess); float* seventh_back = 0; hipHostMalloc(&seventh_back, sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch); hipMemcpy(seventh_back, GPU(convnet)->device[device_id].backwards[6], sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch, hipMemcpyDeviceToHost); float* seventh_grad = 0; hipHostMalloc(&seventh_grad, sizeof(float) * (seventh_gpu_layer->wnum + seventh_gpu_layer->net.convolutional.count)); assert(seventh_grad); hipMemcpy(seventh_grad, seventh_gpu_configuration->w, sizeof(float) * (seventh_gpu_layer->wnum + seventh_gpu_layer->net.convolutional.count), hipMemcpyDeviceToHost); printf("finished backward propagate seventh convolutional layer on GPU\n"); // sixth convolutonal layer backward propagate ccv_convnet_layer_t* sixth_gpu_configuration = GPU(convnet)->device[device_id].configurations + 5; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.x = 8; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.y = 3; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.z = 32; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.y = 8; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(sixth_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[6], GPU(convnet)->device[device_id].forwards[5], GPU(convnet)->device[device_id].forwards[4], GPU(convnet)->device[device_id].backwards[5], sixth_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for sixth convolutional layer bprop: %f milliseconds\n", EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); assert(hipGetLastError() == hipSuccess); float* sixth_back = 0; hipHostMalloc(&sixth_back, sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch); hipMemcpy(sixth_back, GPU(convnet)->device[device_id].backwards[5], sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch, hipMemcpyDeviceToHost); float* sixth_grad = 0; hipHostMalloc(&sixth_grad, sizeof(float) * (sixth_gpu_layer->wnum + sixth_gpu_layer->net.convolutional.count)); assert(sixth_grad); hipMemcpy(sixth_grad, sixth_gpu_configuration->w, sizeof(float) * (sixth_gpu_layer->wnum + sixth_gpu_layer->net.convolutional.count), hipMemcpyDeviceToHost); printf("finished backward propagate sixth convolutional layer on GPU\n"); // fifth convolutonal layer backward propagate ccv_convnet_layer_t* fifth_gpu_configuration = GPU(convnet)->device[device_id].configurations + 4; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.x = 8; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.y = 3; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.z = 32; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.y = 8; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.z = 32; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(fifth_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[5], GPU(convnet)->device[device_id].forwards[4], GPU(convnet)->device[device_id].forwards[3], GPU(convnet)->device[device_id].backwards[4], fifth_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for fifth convolutional layer bprop: %f milliseconds\n", EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); assert(hipGetLastError() == hipSuccess); float* fifth_back = 0; hipHostMalloc(&fifth_back, sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch); hipMemcpy(fifth_back, GPU(convnet)->device[device_id].backwards[4], sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch, hipMemcpyDeviceToHost); float* fifth_grad = 0; hipHostMalloc(&fifth_grad, sizeof(float) * (fifth_gpu_layer->wnum + fifth_gpu_layer->net.convolutional.count)); assert(fifth_grad); hipMemcpy(fifth_grad, fifth_gpu_configuration->w, sizeof(float) * (fifth_gpu_layer->wnum + fifth_gpu_layer->net.convolutional.count), hipMemcpyDeviceToHost); printf("finished backward propagate fifth convolutional layer on GPU\n"); // third convolutonal layer backward propagate hipMemcpy(GPU(convnet)->device[device_id].backwards[3], GPU(convnet)->device[device_id].forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, hipMemcpyDeviceToDevice); ccv_convnet_layer_t* third_gpu_configuration = GPU(convnet)->device[device_id].configurations + 2; EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.x = 4; EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.y = 4; EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.z = 16; EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.y = 6; EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.z = 24; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(third_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[3], GPU(convnet)->device[device_id].forwards[2], GPU(convnet)->device[device_id].forwards[1], GPU(convnet)->device[device_id].backwards[2], third_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for third convolutional layer bprop: %f milliseconds\n", EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); assert(hipGetLastError() == hipSuccess); float* third_back = 0; hipHostMalloc(&third_back, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); hipMemcpy(third_back, GPU(convnet)->device[device_id].backwards[2], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, hipMemcpyDeviceToHost); float* third_grad = 0; hipHostMalloc(&third_grad, sizeof(float) * (third_gpu_layer->wnum + third_gpu_layer->net.convolutional.count)); assert(third_grad); hipMemcpy(third_grad, third_gpu_configuration->w, sizeof(float) * (third_gpu_layer->wnum + third_gpu_layer->net.convolutional.count), hipMemcpyDeviceToHost); printf("finished backward propagate third convolutional layer on GPU\n"); // second average pool layer backward propagate cwc_convnet_average_pool_backward_propagate(second_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[2], GPU(convnet)->device[device_id].backwards[1], context->device[device_id].data_stream); hipStreamSynchronize(context->device[device_id].data_stream); assert(hipGetLastError() == hipSuccess); float* second_back = 0; hipHostMalloc(&second_back, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); hipMemcpy(second_back, GPU(convnet)->device[device_id].backwards[1], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, hipMemcpyDeviceToHost); printf("finished backward propagate second average pool layer on GPU\n"); // first convolutional layer backward propagate ccv_convnet_layer_t* first_gpu_configuration = GPU(convnet)->device[device_id].configurations; EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.x = 1; EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.y = 3; EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.z = 1; hipEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(first_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[1], GPU(convnet)->device[device_id].forwards[0], context->device[device_id].input, GPU(convnet)->device[device_id].backwards[0], first_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); hipEventRecord(stop, context->device[device_id].data_stream); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for first convolutional layer bprop: %f milliseconds\n", EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.z, elapsed_time); hipStreamSynchronize(context->device[device_id].data_stream); assert(hipGetLastError() == hipSuccess); float* first_grad = 0; hipHostMalloc(&first_grad, sizeof(float) * (first_gpu_layer->wnum + first_gpu_layer->net.convolutional.count)); assert(first_grad); hipMemcpy(first_grad, first_gpu_configuration->w, sizeof(float) * (first_gpu_layer->wnum + first_gpu_layer->net.convolutional.count), hipMemcpyDeviceToHost); printf("finished backward propagate first convolutional layer on GPU\n"); hipEventDestroy(start); hipEventDestroy(stop); int x, y, k, c; for (i = 0; i < batch; i++) { printf("doing batch %d of %d\n", i + 1, batch); ccv_categorized_t* categorized = (ccv_categorized_t*)ccv_array_get(categorizeds, i); for (x = 0; x < categorized->matrix->rows * categorized->matrix->cols * CCV_GET_CHANNEL(categorized->matrix->type); x++) categorized->matrix->data.f32[x] = categorized->matrix->data.f32[x] - 128; // first convolutional layer forward propagate ccv_convnet_layer_t* first_cpu_layer = convnet->layers; _ccv_convnet_convolutional_forward_propagate(first_cpu_layer, categorized->matrix, convnet->acts); ccv_dense_matrix_t* a = convnet->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = first_out[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = a->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 1: %d %d %d %d: |%f - %f| = %f\n", i, x, y, k, p, q, delta); } // second average pool layer forward propagate ccv_convnet_layer_t* second_cpu_layer = convnet->layers + 1; _ccv_convnet_average_pool_forward_propagate(second_cpu_layer, convnet->acts[0], convnet->acts + 1); ccv_dense_matrix_t* b = convnet->acts[1]; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels; k++) { float p = second_out[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = b->data.f32[y * second_out_cols * second_out_channels + x * second_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool fprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer forward propagate ccv_convnet_layer_t* third_cpu_layer = convnet->layers + 2; _ccv_convnet_convolutional_forward_propagate(third_cpu_layer, convnet->acts[1], convnet->acts + 2); ccv_dense_matrix_t* c = convnet->acts[2]; for (y = 0; y < third_out_rows; y++) for (x = 0; x < third_out_cols; x++) for (k = 0; k < third_out_channels; k++) { float p = third_out[k * third_out_rows * third_out_cols * batch + (y * third_out_cols + x) * batch + i]; float q = c->data.f32[(y * third_out_cols + x) * third_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // forth average pool layer forward propagate ccv_convnet_layer_t* forth_cpu_layer = convnet->layers + 3; _ccv_convnet_average_pool_forward_propagate(forth_cpu_layer, convnet->acts[2], convnet->acts + 3); ccv_dense_matrix_t* d = convnet->acts[3]; for (y = 0; y < forth_out_rows; y++) for (x = 0; x < forth_out_cols; x++) for (k = 0; k < forth_out_channels; k++) { float p = forth_out[k * forth_out_rows * forth_out_cols * batch + (y * forth_out_cols + x) * batch + i]; float q = d->data.f32[y * forth_out_cols * forth_out_channels + x * forth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool fprop 4: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // fifth convolutional layer forward propagate ccv_convnet_layer_t* fifth_cpu_layer = convnet->layers + 4; _ccv_convnet_convolutional_forward_propagate(fifth_cpu_layer, convnet->acts[3], convnet->acts + 4); ccv_dense_matrix_t* e = convnet->acts[4]; for (y = 0; y < fifth_out_rows; y++) for (x = 0; x < fifth_out_cols; x++) for (k = 0; k < fifth_out_channels; k++) { float p = fifth_out[k * fifth_out_rows * fifth_out_cols * batch + (y * fifth_out_cols + x) * batch + i]; float q = e->data.f32[(y * fifth_out_cols + x) * fifth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 5: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // sixth convolutional layer forward propagate ccv_convnet_layer_t* sixth_cpu_layer = convnet->layers + 5; _ccv_convnet_convolutional_forward_propagate(sixth_cpu_layer, convnet->acts[4], convnet->acts + 5); ccv_dense_matrix_t* f = convnet->acts[5]; for (y = 0; y < sixth_out_rows; y++) for (x = 0; x < sixth_out_cols; x++) for (k = 0; k < sixth_out_channels; k++) { float p = sixth_out[k * sixth_out_rows * sixth_out_cols * batch + (y * sixth_out_cols + x) * batch + i]; float q = f->data.f32[(y * sixth_out_cols + x) * sixth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 6: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // seventh convolutional layer forward propagate ccv_convnet_layer_t* seventh_cpu_layer = convnet->layers + 6; _ccv_convnet_convolutional_forward_propagate(seventh_cpu_layer, convnet->acts[5], convnet->acts + 6); ccv_dense_matrix_t* g = convnet->acts[6]; for (y = 0; y < seventh_out_rows; y++) for (x = 0; x < seventh_out_cols; x++) for (k = 0; k < seventh_out_channels; k++) { float p = seventh_out[k * seventh_out_rows * seventh_out_cols * batch + (y * seventh_out_cols + x) * batch + i]; float q = g->data.f32[(y * seventh_out_cols + x) * seventh_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 7: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // eleventh full connect layer forward propagate ccv_convnet_layer_t* eleventh_cpu_layer = convnet->layers + 10; convnet->acts[9] = ccv_dense_matrix_new(eleventh_cpu_layer->input.node.count, 1, CCV_32F | CCV_C1, 0, 0); for (k = 0; k < eleventh_cpu_layer->input.node.count; k++) convnet->acts[9]->data.f32[k] = eleventh_in[k * batch + i]; _ccv_convnet_full_connect_forward_propagate(eleventh_cpu_layer, convnet->acts[9], convnet->acts + 10); ccv_dense_matrix_t* z = convnet->acts[10]; for (k = 0; k < eleventh_cpu_layer->net.full_connect.count; k++) { float p = eleventh_out[k * batch + i]; float q = z->data.f32[k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("fc fprop 11: %d %d: |%g - %g| = %g\n", i, k, p, q, delta); } _ccv_convnet_full_connect_backward_propagate(eleventh_cpu_layer, convnet->acts[10], convnet->acts[10], convnet->acts[9], update_params->acts + 9, update_params->layers + 10); ccv_matrix_free(convnet->acts[9]); ccv_dense_matrix_t* bz = update_params->acts[9]; for (k = 0; k < eleventh_cpu_layer->input.node.count; k++) { float p = eleventh_back[k * batch + i]; float q = bz->data.f32[k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("fc bprop 11: %d %d: |%g - %g| = %g\n", i, k, p, q, delta); } // seventh convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(seventh_cpu_layer, convnet->acts[6], convnet->acts[6], convnet->acts[5], update_params->acts + 5, update_params->layers + 6); ccv_dense_matrix_t* bg = update_params->acts[5]; for (y = 0; y < sixth_out_rows; y++) for (x = 0; x < sixth_out_cols; x++) for (k = 0; k < sixth_out_channels; k++) { float p = seventh_back[k * sixth_out_rows * sixth_out_cols * batch + (y * sixth_out_cols + x) * batch + i]; float q = bg->data.f32[(y * sixth_out_cols + x) * sixth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 7: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // sixth convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(sixth_cpu_layer, update_params->acts[5], convnet->acts[5], convnet->acts[4], update_params->acts + 4, update_params->layers + 5); ccv_dense_matrix_t* bf = update_params->acts[4]; for (y = 0; y < fifth_out_rows; y++) for (x = 0; x < fifth_out_cols; x++) for (k = 0; k < fifth_out_channels; k++) { float p = sixth_back[k * fifth_out_rows * fifth_out_cols * batch + (y * fifth_out_cols + x) * batch + i]; float q = bf->data.f32[(y * fifth_out_cols + x) * fifth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 6: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // fifth convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(fifth_cpu_layer, update_params->acts[4], convnet->acts[4], convnet->acts[3], update_params->acts + 3, update_params->layers + 4); ccv_dense_matrix_t* be = update_params->acts[3]; for (y = 0; y < forth_out_rows; y++) for (x = 0; x < forth_out_cols; x++) for (k = 0; k < forth_out_channels; k++) { float p = fifth_back[k * forth_out_rows * forth_out_cols * batch + (y * forth_out_cols + x) * batch + i]; float q = be->data.f32[(y * forth_out_cols + x) * forth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-2) printf("conv bprop 5: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(third_cpu_layer, convnet->acts[2], convnet->acts[2], convnet->acts[1], update_params->acts + 1, update_params->layers + 2); ccv_dense_matrix_t* bc = update_params->acts[1]; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels; k++) { float p = third_back[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = bc->data.f32[(y * second_out_cols + x) * second_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // second average pool layer backward propagate _ccv_convnet_average_pool_backward_propagate(second_cpu_layer, update_params->acts[1], convnet->acts[0], update_params->acts); ccv_dense_matrix_t* bb = update_params->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = second_back[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = bb->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool bprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // first convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(first_cpu_layer, update_params->acts[0], convnet->acts[0], categorized->matrix, 0, update_params->layers); } ccv_convnet_layer_t* eleventh_cpu_configuration = update_params->layers + 10; for (x = 0; x < eleventh_cpu_configuration->net.full_connect.count; x++) for (y = 0; y < eleventh_cpu_configuration->input.node.count; y++) { float p = eleventh_cpu_configuration->w[x * eleventh_cpu_configuration->input.node.count + y]; float q = eleventh_grad[x * eleventh_cpu_configuration->input.node.count + y]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("fc bprop 11: %d %d: |%g - %g| = %g\n", x, y, p, q, delta); } for (x = 0; x < eleventh_cpu_configuration->net.full_connect.count; x++) { float p = eleventh_cpu_configuration->bias[x]; float q = eleventh_grad[eleventh_cpu_configuration->net.full_connect.count * eleventh_cpu_configuration->input.node.count + x]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("fc bprop 11 bias: %d: |%g - %g| = %g\n", x, p, q, delta); } ccv_convnet_layer_t* seventh_cpu_configuration = update_params->layers + 6; int seventh_filter_rows = seventh_gpu_layer->net.convolutional.rows; int seventh_filter_cols = seventh_gpu_layer->net.convolutional.cols; int seventh_filter_count = seventh_gpu_layer->net.convolutional.count; int seventh_filter_channels = seventh_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < seventh_filter_rows; y++) for (x = 0; x < seventh_filter_cols; x++) for (k = 0; k < seventh_filter_count; k++) for (c = 0; c < seventh_filter_channels; c++) { float p = seventh_cpu_configuration->w[(y * seventh_filter_cols + x) * seventh_filter_channels + k * seventh_filter_cols * seventh_filter_rows * seventh_filter_channels + c]; float q = seventh_grad[(y * seventh_filter_cols + x) * seventh_filter_count + k + c * seventh_filter_cols * seventh_filter_rows * seventh_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 7: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < seventh_filter_count; k++) { float p = seventh_cpu_configuration->bias[k]; float q = seventh_grad[seventh_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 7 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* sixth_cpu_configuration = update_params->layers + 5; int sixth_filter_rows = sixth_gpu_layer->net.convolutional.rows; int sixth_filter_cols = sixth_gpu_layer->net.convolutional.cols; int sixth_filter_count = sixth_gpu_layer->net.convolutional.count; int sixth_filter_channels = sixth_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < sixth_filter_rows; y++) for (x = 0; x < sixth_filter_cols; x++) for (k = 0; k < sixth_filter_count; k++) for (c = 0; c < sixth_filter_channels; c++) { float p = sixth_cpu_configuration->w[(y * sixth_filter_cols + x) * sixth_filter_channels + k * sixth_filter_cols * sixth_filter_rows * sixth_filter_channels + c]; float q = sixth_grad[(y * sixth_filter_cols + x) * sixth_filter_count + k + c * sixth_filter_cols * sixth_filter_rows * sixth_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 6: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < sixth_filter_count; k++) { float p = sixth_cpu_configuration->bias[k]; float q = sixth_grad[sixth_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 6 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* fifth_cpu_configuration = update_params->layers + 4; int fifth_filter_rows = fifth_gpu_layer->net.convolutional.rows; int fifth_filter_cols = fifth_gpu_layer->net.convolutional.cols; int fifth_filter_count = fifth_gpu_layer->net.convolutional.count; int fifth_filter_channels = fifth_gpu_layer->net.convolutional.channels; for (y = 0; y < fifth_filter_rows; y++) for (x = 0; x < fifth_filter_cols; x++) for (k = 0; k < fifth_filter_count; k++) for (c = 0; c < fifth_filter_channels; c++) { float p = fifth_cpu_configuration->w[(y * fifth_filter_cols + x) * fifth_filter_channels + k * fifth_filter_cols * fifth_filter_rows * fifth_filter_channels + c]; float q = fifth_grad[(y * fifth_filter_cols + x) * fifth_filter_count + k + c * fifth_filter_cols * fifth_filter_rows * fifth_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-2) printf("conv bprop 5: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < fifth_filter_count; k++) { float p = fifth_cpu_configuration->bias[k]; float q = fifth_grad[fifth_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 5 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* third_cpu_configuration = update_params->layers + 2; int third_filter_rows = third_gpu_layer->net.convolutional.rows; int third_filter_cols = third_gpu_layer->net.convolutional.cols; int third_filter_count = third_gpu_layer->net.convolutional.count; int third_filter_channels = third_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < third_filter_rows; y++) for (x = 0; x < third_filter_cols; x++) for (k = 0; k < third_filter_count; k++) for (c = 0; c < third_filter_channels; c++) { float p = third_cpu_configuration->w[(y * third_filter_cols + x) * third_filter_channels + k * third_filter_cols * third_filter_rows * third_filter_channels + c]; float q = third_grad[(y * third_filter_cols + x) * third_filter_count + k + c * third_filter_cols * third_filter_rows * third_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < third_filter_count; k++) { float p = third_cpu_configuration->bias[k]; float q = third_grad[third_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* first_cpu_configuration = update_params->layers; int first_filter_rows = first_gpu_layer->net.convolutional.rows; int first_filter_cols = first_gpu_layer->net.convolutional.cols; int first_filter_count = first_gpu_layer->net.convolutional.count; int first_filter_channels = first_gpu_layer->net.convolutional.channels; for (y = 0; y < first_filter_rows; y++) for (x = 0; x < first_filter_cols; x++) for (k = 0; k < first_filter_count; k++) for (c = 0; c < first_filter_channels; c++) { float p = first_cpu_configuration->w[(y * first_filter_cols + x) * first_filter_channels + k * first_filter_cols * first_filter_rows * first_filter_channels + c]; float q = first_grad[(y * first_filter_cols + x) * first_filter_count + k + c * first_filter_cols * first_filter_rows * first_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 1: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < first_filter_count; k++) { float p = first_cpu_configuration->bias[k]; float q = first_grad[first_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 1 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } hipHostFree(eleventh_in); }
713aab66a802971014ca0ff3d52b6a87d8924ef7.cu
#undef USE_DISPATCH // nvcc doesn't support libdispatch extern "C" { #include "ccv.h" } #include <ctype.h> #define CASE_TESTS // so that we don't include public available methods #include "../lib/cuda/cwc_convnet.cu" #include "../lib/ccv_convnet.c" extern "C" void cwc_verify_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params) { int batch = params.mini_batch; int i, j; const int device_id = 0; _cwc_convnet_alloc_reserved_both(convnet, batch, 1, params.layer_params); cwc_convnet_context_t* context = GPU(convnet)->contexts; for (i = 0; i < convnet->rows * convnet->cols * convnet->channels; i++) convnet->mean_activity->data.f32[i] = 128; cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, ccv_size(225, 225), 225, 225, convnet->rows, convnet->cols, convnet->channels, 1000, 0, batch, 0, batch, context->host[device_id].input, context->host[device_id].c); cudaMemcpy(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * batch, cudaMemcpyHostToDevice); ccv_convnet_t* update_params = _ccv_convnet_update_new(convnet); _ccv_convnet_update_zero(update_params); // first convolutional layer forward propagate ccv_convnet_layer_t* first_gpu_layer = GPU(convnet)->device[device_id].layers; // these are the setups for TITAN, thus, skip the benching phase EXTRA(first_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(first_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(first_gpu_layer)->vary.convolutional.forward.z = 32; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(first_gpu_layer, first_gpu_layer->input.matrix.rows, first_gpu_layer->input.matrix.cols, batch, context->device[device_id].input, GPU(convnet)->device[device_id].forwards[0], context->device[device_id].data_stream); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); float elapsed_time = 0; cudaEventElapsedTime(&elapsed_time, start, stop); cudaStreamSynchronize(context->device[device_id].data_stream); printf("%d %d %d, elapsed time for first convolutional layer fprop: %f milliseconds\n", EXTRA(first_gpu_layer)->vary.convolutional.forward.x, EXTRA(first_gpu_layer)->vary.convolutional.forward.y, EXTRA(first_gpu_layer)->vary.convolutional.forward.z, elapsed_time); int first_out_rows, first_out_cols, first_out_partition, first_out_channels = first_gpu_layer->net.convolutional.count; ccv_convnet_make_output(first_gpu_layer, first_gpu_layer->input.matrix.rows, first_gpu_layer->input.matrix.cols, &first_out_rows, &first_out_cols, &first_out_partition); float* first_out = 0; cudaMallocHost(&first_out, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); cudaMemcpy(first_out, GPU(convnet)->device[device_id].forwards[0], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate first convolutional layer on GPU\n"); // second average pool layer forward propagate ccv_convnet_layer_t* second_gpu_layer = GPU(convnet)->device[device_id].layers + 1; cwc_convnet_average_pool_forward_propagate(second_gpu_layer, second_gpu_layer->input.matrix.rows, second_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[0], GPU(convnet)->device[device_id].forwards[1], context->device[device_id].data_stream); cudaStreamSynchronize(context->device[device_id].data_stream); int second_out_rows, second_out_cols, second_out_partition, second_out_channels = second_gpu_layer->input.matrix.channels; ccv_convnet_make_output(second_gpu_layer, second_gpu_layer->input.matrix.rows, second_gpu_layer->input.matrix.cols, &second_out_rows, &second_out_cols, &second_out_partition); float* second_out = 0; cudaMallocHost(&second_out, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); cudaMemcpy(second_out, GPU(convnet)->device[device_id].forwards[1], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate second average pool layer on GPU\n"); // third convolutional layer forward propagate ccv_convnet_layer_t* third_gpu_layer = GPU(convnet)->device[device_id].layers + 2; // these are the setups for TITAN, thus, skip the benching phase EXTRA(third_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(third_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(third_gpu_layer)->vary.convolutional.forward.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(third_gpu_layer, third_gpu_layer->input.matrix.rows, third_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[1], GPU(convnet)->device[device_id].forwards[2], context->device[device_id].data_stream); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for third convolutional layer fprop: %f milliseconds\n", EXTRA(third_gpu_layer)->vary.convolutional.forward.x, EXTRA(third_gpu_layer)->vary.convolutional.forward.y, EXTRA(third_gpu_layer)->vary.convolutional.forward.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); int third_out_rows, third_out_cols, third_out_partition, third_out_channels = third_gpu_layer->net.convolutional.count; ccv_convnet_make_output(third_gpu_layer, third_gpu_layer->input.matrix.rows, third_gpu_layer->input.matrix.cols, &third_out_rows, &third_out_cols, &third_out_partition); float* third_out = 0; cudaMallocHost(&third_out, sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch); cudaMemcpy(third_out, GPU(convnet)->device[device_id].forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate third convolutional layer on GPU\n"); // forth average pool layer forward propagate ccv_convnet_layer_t* forth_gpu_layer = GPU(convnet)->device[device_id].layers + 3; cwc_convnet_average_pool_forward_propagate(forth_gpu_layer, forth_gpu_layer->input.matrix.rows, forth_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[2], GPU(convnet)->device[device_id].forwards[3], context->device[device_id].data_stream); cudaStreamSynchronize(context->device[device_id].data_stream); int forth_out_rows, forth_out_cols, forth_out_partition, forth_out_channels = forth_gpu_layer->input.matrix.channels; ccv_convnet_make_output(forth_gpu_layer, forth_gpu_layer->input.matrix.rows, forth_gpu_layer->input.matrix.cols, &forth_out_rows, &forth_out_cols, &forth_out_partition); float* forth_out = 0; cudaMallocHost(&forth_out, sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch); cudaMemcpy(forth_out, GPU(convnet)->device[device_id].forwards[3], sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate forth average pool layer on GPU\n"); // fifth convolutional layer forward propagate ccv_convnet_layer_t* fifth_gpu_layer = GPU(convnet)->device[device_id].layers + 4; // these are the setups for TITAN, thus, skip the benching phase EXTRA(fifth_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(fifth_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(fifth_gpu_layer)->vary.convolutional.forward.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(fifth_gpu_layer, fifth_gpu_layer->input.matrix.rows, fifth_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[3], GPU(convnet)->device[device_id].forwards[4], context->device[device_id].data_stream); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for fifth convolutional layer fprop: %f milliseconds\n", EXTRA(fifth_gpu_layer)->vary.convolutional.forward.x, EXTRA(fifth_gpu_layer)->vary.convolutional.forward.y, EXTRA(fifth_gpu_layer)->vary.convolutional.forward.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); int fifth_out_rows, fifth_out_cols, fifth_out_partition, fifth_out_channels = fifth_gpu_layer->net.convolutional.count; ccv_convnet_make_output(fifth_gpu_layer, fifth_gpu_layer->input.matrix.rows, fifth_gpu_layer->input.matrix.cols, &fifth_out_rows, &fifth_out_cols, &fifth_out_partition); float* fifth_out = 0; cudaMallocHost(&fifth_out, sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch); cudaMemcpy(fifth_out, GPU(convnet)->device[device_id].forwards[4], sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate fifth convolutional layer on GPU\n"); // sixth convolutional layer forward propagate ccv_convnet_layer_t* sixth_gpu_layer = GPU(convnet)->device[device_id].layers + 5; // these are the setups for TITAN, thus, skip the benching phase EXTRA(sixth_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(sixth_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(sixth_gpu_layer)->vary.convolutional.forward.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(sixth_gpu_layer, sixth_gpu_layer->input.matrix.rows, sixth_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[4], GPU(convnet)->device[device_id].forwards[5], context->device[device_id].data_stream); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for sixth convolutional layer fprop: %f milliseconds\n", EXTRA(sixth_gpu_layer)->vary.convolutional.forward.x, EXTRA(sixth_gpu_layer)->vary.convolutional.forward.y, EXTRA(sixth_gpu_layer)->vary.convolutional.forward.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); int sixth_out_rows, sixth_out_cols, sixth_out_partition, sixth_out_channels = sixth_gpu_layer->net.convolutional.count; ccv_convnet_make_output(sixth_gpu_layer, sixth_gpu_layer->input.matrix.rows, sixth_gpu_layer->input.matrix.cols, &sixth_out_rows, &sixth_out_cols, &sixth_out_partition); float* sixth_out = 0; cudaMallocHost(&sixth_out, sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch); cudaMemcpy(sixth_out, GPU(convnet)->device[device_id].forwards[5], sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate sixth convolutional layer on GPU\n"); // seventh convolutional layer forward propagate ccv_convnet_layer_t* seventh_gpu_layer = GPU(convnet)->device[device_id].layers + 6; // these are the setups for TITAN, thus, skip the benching phase EXTRA(seventh_gpu_layer)->vary.convolutional.forward.x = 4; EXTRA(seventh_gpu_layer)->vary.convolutional.forward.y = 8; EXTRA(seventh_gpu_layer)->vary.convolutional.forward.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_forward_propagate(seventh_gpu_layer, seventh_gpu_layer->input.matrix.rows, seventh_gpu_layer->input.matrix.cols, batch, GPU(convnet)->device[device_id].forwards[5], GPU(convnet)->device[device_id].forwards[6], context->device[device_id].data_stream); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for seventh convolutional layer fprop: %f milliseconds\n", EXTRA(seventh_gpu_layer)->vary.convolutional.forward.x, EXTRA(seventh_gpu_layer)->vary.convolutional.forward.y, EXTRA(seventh_gpu_layer)->vary.convolutional.forward.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); int seventh_out_rows, seventh_out_cols, seventh_out_partition, seventh_out_channels = seventh_gpu_layer->net.convolutional.count; ccv_convnet_make_output(seventh_gpu_layer, seventh_gpu_layer->input.matrix.rows, seventh_gpu_layer->input.matrix.cols, &seventh_out_rows, &seventh_out_cols, &seventh_out_partition); float* seventh_out = 0; cudaMallocHost(&seventh_out, sizeof(float) * seventh_out_rows * seventh_out_cols * seventh_out_channels * batch); cudaMemcpy(seventh_out, GPU(convnet)->device[device_id].forwards[6], sizeof(float) * seventh_out_rows * seventh_out_cols * seventh_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate seventh convolutional layer on GPU\n"); // the last full connect layer forward propagate ccv_convnet_layer_t* eleventh_gpu_layer = GPU(convnet)->device[device_id].layers + 10; float* eleventh_in = 0; cudaMallocHost(&eleventh_in, sizeof(float) * batch * eleventh_gpu_layer->input.node.count); for (i = 0; i < batch; i++) for (j = 0; j < eleventh_gpu_layer->input.node.count; j++) eleventh_in[j * batch + i] = (j - 100 + i) / 200; cudaMemcpy(GPU(convnet)->device[device_id].forwards[9], eleventh_in, sizeof(float) * batch * eleventh_gpu_layer->input.node.count, cudaMemcpyHostToDevice); cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_full_connect_forward_propagate(eleventh_gpu_layer, 128, GPU(convnet)->device[device_id].forwards[9], GPU(convnet)->device[device_id].forwards[10], GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("elapsed time for eleventh full connect layer fprop: %f milliseconds\n", elapsed_time); float* eleventh_out = 0; cudaMallocHost(&eleventh_out, sizeof(float) * batch * eleventh_gpu_layer->net.full_connect.count); cudaMemcpy(eleventh_out, GPU(convnet)->device[device_id].forwards[10], sizeof(float) * batch * eleventh_gpu_layer->net.full_connect.count, cudaMemcpyDeviceToHost); printf("finished forward propagate eleventh full connect layer on GPU\n"); // eleventh full connect layer backward propagate ccv_convnet_layer_t* eleventh_gpu_configuration = GPU(convnet)->device[device_id].configurations + 10; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_full_connect_backward_propagate(eleventh_gpu_layer, batch, GPU(convnet)->device[device_id].forwards[10], GPU(convnet)->device[device_id].forwards[10], GPU(convnet)->device[device_id].forwards[9], GPU(convnet)->device[device_id].backwards[10], GPU(convnet)->device[device_id].unit, eleventh_gpu_configuration->w, eleventh_gpu_configuration->bias, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("elapsed time for eleventh full connect layer bprop: %f milliseconds\n", elapsed_time); float* eleventh_back = 0; cudaMallocHost(&eleventh_back, sizeof(float) * eleventh_gpu_layer->input.node.count * batch); cudaMemcpy(eleventh_back, GPU(convnet)->device[device_id].backwards[10], sizeof(float) * eleventh_gpu_layer->input.node.count * batch, cudaMemcpyDeviceToHost); float* eleventh_grad = 0; cudaMallocHost(&eleventh_grad, sizeof(float) * (eleventh_gpu_layer->wnum + eleventh_gpu_layer->net.full_connect.count)); assert(eleventh_grad); cudaMemcpy(eleventh_grad, eleventh_gpu_configuration->w, sizeof(float) * (eleventh_gpu_layer->wnum + eleventh_gpu_layer->net.full_connect.count), cudaMemcpyDeviceToHost); printf("finished backward propagate eleventh full connect layer on GPU\n"); // seventh convolutonal layer backward propagate cudaMemcpy(GPU(convnet)->device[device_id].backwards[7], GPU(convnet)->device[device_id].forwards[6], sizeof(float) * seventh_out_rows * seventh_out_cols * seventh_out_channels * batch, cudaMemcpyDeviceToDevice); ccv_convnet_layer_t* seventh_gpu_configuration = GPU(convnet)->device[device_id].configurations + 6; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.x = 8; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.y = 4; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.z = 32; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.y = 8; EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(seventh_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[7], GPU(convnet)->device[device_id].forwards[6], GPU(convnet)->device[device_id].forwards[5], GPU(convnet)->device[device_id].backwards[6], seventh_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for seventh convolutional layer bprop: %f milliseconds\n", EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(seventh_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); float* seventh_back = 0; cudaMallocHost(&seventh_back, sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch); cudaMemcpy(seventh_back, GPU(convnet)->device[device_id].backwards[6], sizeof(float) * sixth_out_rows * sixth_out_cols * sixth_out_channels * batch, cudaMemcpyDeviceToHost); float* seventh_grad = 0; cudaMallocHost(&seventh_grad, sizeof(float) * (seventh_gpu_layer->wnum + seventh_gpu_layer->net.convolutional.count)); assert(seventh_grad); cudaMemcpy(seventh_grad, seventh_gpu_configuration->w, sizeof(float) * (seventh_gpu_layer->wnum + seventh_gpu_layer->net.convolutional.count), cudaMemcpyDeviceToHost); printf("finished backward propagate seventh convolutional layer on GPU\n"); // sixth convolutonal layer backward propagate ccv_convnet_layer_t* sixth_gpu_configuration = GPU(convnet)->device[device_id].configurations + 5; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.x = 8; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.y = 3; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.z = 32; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.y = 8; EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(sixth_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[6], GPU(convnet)->device[device_id].forwards[5], GPU(convnet)->device[device_id].forwards[4], GPU(convnet)->device[device_id].backwards[5], sixth_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for sixth convolutional layer bprop: %f milliseconds\n", EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(sixth_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); float* sixth_back = 0; cudaMallocHost(&sixth_back, sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch); cudaMemcpy(sixth_back, GPU(convnet)->device[device_id].backwards[5], sizeof(float) * fifth_out_rows * fifth_out_cols * fifth_out_channels * batch, cudaMemcpyDeviceToHost); float* sixth_grad = 0; cudaMallocHost(&sixth_grad, sizeof(float) * (sixth_gpu_layer->wnum + sixth_gpu_layer->net.convolutional.count)); assert(sixth_grad); cudaMemcpy(sixth_grad, sixth_gpu_configuration->w, sizeof(float) * (sixth_gpu_layer->wnum + sixth_gpu_layer->net.convolutional.count), cudaMemcpyDeviceToHost); printf("finished backward propagate sixth convolutional layer on GPU\n"); // fifth convolutonal layer backward propagate ccv_convnet_layer_t* fifth_gpu_configuration = GPU(convnet)->device[device_id].configurations + 4; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.x = 8; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.y = 3; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.z = 32; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.y = 8; EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.z = 32; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(fifth_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[5], GPU(convnet)->device[device_id].forwards[4], GPU(convnet)->device[device_id].forwards[3], GPU(convnet)->device[device_id].backwards[4], fifth_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for fifth convolutional layer bprop: %f milliseconds\n", EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(fifth_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); float* fifth_back = 0; cudaMallocHost(&fifth_back, sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch); cudaMemcpy(fifth_back, GPU(convnet)->device[device_id].backwards[4], sizeof(float) * forth_out_rows * forth_out_cols * forth_out_channels * batch, cudaMemcpyDeviceToHost); float* fifth_grad = 0; cudaMallocHost(&fifth_grad, sizeof(float) * (fifth_gpu_layer->wnum + fifth_gpu_layer->net.convolutional.count)); assert(fifth_grad); cudaMemcpy(fifth_grad, fifth_gpu_configuration->w, sizeof(float) * (fifth_gpu_layer->wnum + fifth_gpu_layer->net.convolutional.count), cudaMemcpyDeviceToHost); printf("finished backward propagate fifth convolutional layer on GPU\n"); // third convolutonal layer backward propagate cudaMemcpy(GPU(convnet)->device[device_id].backwards[3], GPU(convnet)->device[device_id].forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, cudaMemcpyDeviceToDevice); ccv_convnet_layer_t* third_gpu_configuration = GPU(convnet)->device[device_id].configurations + 2; EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.x = 4; EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.y = 4; EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.z = 16; EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.x = 4; EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.y = 6; EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.z = 24; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(third_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[3], GPU(convnet)->device[device_id].forwards[2], GPU(convnet)->device[device_id].forwards[1], GPU(convnet)->device[device_id].backwards[2], third_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, %d %d %d, elapsed time for third convolutional layer bprop: %f milliseconds\n", EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(third_gpu_layer)->vary.convolutional.backward.coefficient.z, EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.x, EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.y, EXTRA(third_gpu_layer)->vary.convolutional.backward.gradient.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); float* third_back = 0; cudaMallocHost(&third_back, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); cudaMemcpy(third_back, GPU(convnet)->device[device_id].backwards[2], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, cudaMemcpyDeviceToHost); float* third_grad = 0; cudaMallocHost(&third_grad, sizeof(float) * (third_gpu_layer->wnum + third_gpu_layer->net.convolutional.count)); assert(third_grad); cudaMemcpy(third_grad, third_gpu_configuration->w, sizeof(float) * (third_gpu_layer->wnum + third_gpu_layer->net.convolutional.count), cudaMemcpyDeviceToHost); printf("finished backward propagate third convolutional layer on GPU\n"); // second average pool layer backward propagate cwc_convnet_average_pool_backward_propagate(second_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[2], GPU(convnet)->device[device_id].backwards[1], context->device[device_id].data_stream); cudaStreamSynchronize(context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); float* second_back = 0; cudaMallocHost(&second_back, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); cudaMemcpy(second_back, GPU(convnet)->device[device_id].backwards[1], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished backward propagate second average pool layer on GPU\n"); // first convolutional layer backward propagate ccv_convnet_layer_t* first_gpu_configuration = GPU(convnet)->device[device_id].configurations; EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.x = 1; EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.y = 3; EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.z = 1; cudaEventRecord(start, context->device[device_id].data_stream); cwc_convnet_convolutional_backward_propagate(first_gpu_layer, batch, GPU(convnet)->device[device_id].backwards[1], GPU(convnet)->device[device_id].forwards[0], context->device[device_id].input, GPU(convnet)->device[device_id].backwards[0], first_gpu_configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%d %d %d, elapsed time for first convolutional layer bprop: %f milliseconds\n", EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.x, EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.y, EXTRA(first_gpu_layer)->vary.convolutional.backward.coefficient.z, elapsed_time); cudaStreamSynchronize(context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); float* first_grad = 0; cudaMallocHost(&first_grad, sizeof(float) * (first_gpu_layer->wnum + first_gpu_layer->net.convolutional.count)); assert(first_grad); cudaMemcpy(first_grad, first_gpu_configuration->w, sizeof(float) * (first_gpu_layer->wnum + first_gpu_layer->net.convolutional.count), cudaMemcpyDeviceToHost); printf("finished backward propagate first convolutional layer on GPU\n"); cudaEventDestroy(start); cudaEventDestroy(stop); int x, y, k, c; for (i = 0; i < batch; i++) { printf("doing batch %d of %d\n", i + 1, batch); ccv_categorized_t* categorized = (ccv_categorized_t*)ccv_array_get(categorizeds, i); for (x = 0; x < categorized->matrix->rows * categorized->matrix->cols * CCV_GET_CHANNEL(categorized->matrix->type); x++) categorized->matrix->data.f32[x] = categorized->matrix->data.f32[x] - 128; // first convolutional layer forward propagate ccv_convnet_layer_t* first_cpu_layer = convnet->layers; _ccv_convnet_convolutional_forward_propagate(first_cpu_layer, categorized->matrix, convnet->acts); ccv_dense_matrix_t* a = convnet->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = first_out[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = a->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 1: %d %d %d %d: |%f - %f| = %f\n", i, x, y, k, p, q, delta); } // second average pool layer forward propagate ccv_convnet_layer_t* second_cpu_layer = convnet->layers + 1; _ccv_convnet_average_pool_forward_propagate(second_cpu_layer, convnet->acts[0], convnet->acts + 1); ccv_dense_matrix_t* b = convnet->acts[1]; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels; k++) { float p = second_out[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = b->data.f32[y * second_out_cols * second_out_channels + x * second_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool fprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer forward propagate ccv_convnet_layer_t* third_cpu_layer = convnet->layers + 2; _ccv_convnet_convolutional_forward_propagate(third_cpu_layer, convnet->acts[1], convnet->acts + 2); ccv_dense_matrix_t* c = convnet->acts[2]; for (y = 0; y < third_out_rows; y++) for (x = 0; x < third_out_cols; x++) for (k = 0; k < third_out_channels; k++) { float p = third_out[k * third_out_rows * third_out_cols * batch + (y * third_out_cols + x) * batch + i]; float q = c->data.f32[(y * third_out_cols + x) * third_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // forth average pool layer forward propagate ccv_convnet_layer_t* forth_cpu_layer = convnet->layers + 3; _ccv_convnet_average_pool_forward_propagate(forth_cpu_layer, convnet->acts[2], convnet->acts + 3); ccv_dense_matrix_t* d = convnet->acts[3]; for (y = 0; y < forth_out_rows; y++) for (x = 0; x < forth_out_cols; x++) for (k = 0; k < forth_out_channels; k++) { float p = forth_out[k * forth_out_rows * forth_out_cols * batch + (y * forth_out_cols + x) * batch + i]; float q = d->data.f32[y * forth_out_cols * forth_out_channels + x * forth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool fprop 4: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // fifth convolutional layer forward propagate ccv_convnet_layer_t* fifth_cpu_layer = convnet->layers + 4; _ccv_convnet_convolutional_forward_propagate(fifth_cpu_layer, convnet->acts[3], convnet->acts + 4); ccv_dense_matrix_t* e = convnet->acts[4]; for (y = 0; y < fifth_out_rows; y++) for (x = 0; x < fifth_out_cols; x++) for (k = 0; k < fifth_out_channels; k++) { float p = fifth_out[k * fifth_out_rows * fifth_out_cols * batch + (y * fifth_out_cols + x) * batch + i]; float q = e->data.f32[(y * fifth_out_cols + x) * fifth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 5: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // sixth convolutional layer forward propagate ccv_convnet_layer_t* sixth_cpu_layer = convnet->layers + 5; _ccv_convnet_convolutional_forward_propagate(sixth_cpu_layer, convnet->acts[4], convnet->acts + 5); ccv_dense_matrix_t* f = convnet->acts[5]; for (y = 0; y < sixth_out_rows; y++) for (x = 0; x < sixth_out_cols; x++) for (k = 0; k < sixth_out_channels; k++) { float p = sixth_out[k * sixth_out_rows * sixth_out_cols * batch + (y * sixth_out_cols + x) * batch + i]; float q = f->data.f32[(y * sixth_out_cols + x) * sixth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 6: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // seventh convolutional layer forward propagate ccv_convnet_layer_t* seventh_cpu_layer = convnet->layers + 6; _ccv_convnet_convolutional_forward_propagate(seventh_cpu_layer, convnet->acts[5], convnet->acts + 6); ccv_dense_matrix_t* g = convnet->acts[6]; for (y = 0; y < seventh_out_rows; y++) for (x = 0; x < seventh_out_cols; x++) for (k = 0; k < seventh_out_channels; k++) { float p = seventh_out[k * seventh_out_rows * seventh_out_cols * batch + (y * seventh_out_cols + x) * batch + i]; float q = g->data.f32[(y * seventh_out_cols + x) * seventh_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 7: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // eleventh full connect layer forward propagate ccv_convnet_layer_t* eleventh_cpu_layer = convnet->layers + 10; convnet->acts[9] = ccv_dense_matrix_new(eleventh_cpu_layer->input.node.count, 1, CCV_32F | CCV_C1, 0, 0); for (k = 0; k < eleventh_cpu_layer->input.node.count; k++) convnet->acts[9]->data.f32[k] = eleventh_in[k * batch + i]; _ccv_convnet_full_connect_forward_propagate(eleventh_cpu_layer, convnet->acts[9], convnet->acts + 10); ccv_dense_matrix_t* z = convnet->acts[10]; for (k = 0; k < eleventh_cpu_layer->net.full_connect.count; k++) { float p = eleventh_out[k * batch + i]; float q = z->data.f32[k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("fc fprop 11: %d %d: |%g - %g| = %g\n", i, k, p, q, delta); } _ccv_convnet_full_connect_backward_propagate(eleventh_cpu_layer, convnet->acts[10], convnet->acts[10], convnet->acts[9], update_params->acts + 9, update_params->layers + 10); ccv_matrix_free(convnet->acts[9]); ccv_dense_matrix_t* bz = update_params->acts[9]; for (k = 0; k < eleventh_cpu_layer->input.node.count; k++) { float p = eleventh_back[k * batch + i]; float q = bz->data.f32[k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("fc bprop 11: %d %d: |%g - %g| = %g\n", i, k, p, q, delta); } // seventh convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(seventh_cpu_layer, convnet->acts[6], convnet->acts[6], convnet->acts[5], update_params->acts + 5, update_params->layers + 6); ccv_dense_matrix_t* bg = update_params->acts[5]; for (y = 0; y < sixth_out_rows; y++) for (x = 0; x < sixth_out_cols; x++) for (k = 0; k < sixth_out_channels; k++) { float p = seventh_back[k * sixth_out_rows * sixth_out_cols * batch + (y * sixth_out_cols + x) * batch + i]; float q = bg->data.f32[(y * sixth_out_cols + x) * sixth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 7: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // sixth convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(sixth_cpu_layer, update_params->acts[5], convnet->acts[5], convnet->acts[4], update_params->acts + 4, update_params->layers + 5); ccv_dense_matrix_t* bf = update_params->acts[4]; for (y = 0; y < fifth_out_rows; y++) for (x = 0; x < fifth_out_cols; x++) for (k = 0; k < fifth_out_channels; k++) { float p = sixth_back[k * fifth_out_rows * fifth_out_cols * batch + (y * fifth_out_cols + x) * batch + i]; float q = bf->data.f32[(y * fifth_out_cols + x) * fifth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 6: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // fifth convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(fifth_cpu_layer, update_params->acts[4], convnet->acts[4], convnet->acts[3], update_params->acts + 3, update_params->layers + 4); ccv_dense_matrix_t* be = update_params->acts[3]; for (y = 0; y < forth_out_rows; y++) for (x = 0; x < forth_out_cols; x++) for (k = 0; k < forth_out_channels; k++) { float p = fifth_back[k * forth_out_rows * forth_out_cols * batch + (y * forth_out_cols + x) * batch + i]; float q = be->data.f32[(y * forth_out_cols + x) * forth_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-2) printf("conv bprop 5: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(third_cpu_layer, convnet->acts[2], convnet->acts[2], convnet->acts[1], update_params->acts + 1, update_params->layers + 2); ccv_dense_matrix_t* bc = update_params->acts[1]; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels; k++) { float p = third_back[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = bc->data.f32[(y * second_out_cols + x) * second_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // second average pool layer backward propagate _ccv_convnet_average_pool_backward_propagate(second_cpu_layer, update_params->acts[1], convnet->acts[0], update_params->acts); ccv_dense_matrix_t* bb = update_params->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = second_back[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = bb->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool bprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // first convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(first_cpu_layer, update_params->acts[0], convnet->acts[0], categorized->matrix, 0, update_params->layers); } ccv_convnet_layer_t* eleventh_cpu_configuration = update_params->layers + 10; for (x = 0; x < eleventh_cpu_configuration->net.full_connect.count; x++) for (y = 0; y < eleventh_cpu_configuration->input.node.count; y++) { float p = eleventh_cpu_configuration->w[x * eleventh_cpu_configuration->input.node.count + y]; float q = eleventh_grad[x * eleventh_cpu_configuration->input.node.count + y]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("fc bprop 11: %d %d: |%g - %g| = %g\n", x, y, p, q, delta); } for (x = 0; x < eleventh_cpu_configuration->net.full_connect.count; x++) { float p = eleventh_cpu_configuration->bias[x]; float q = eleventh_grad[eleventh_cpu_configuration->net.full_connect.count * eleventh_cpu_configuration->input.node.count + x]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("fc bprop 11 bias: %d: |%g - %g| = %g\n", x, p, q, delta); } ccv_convnet_layer_t* seventh_cpu_configuration = update_params->layers + 6; int seventh_filter_rows = seventh_gpu_layer->net.convolutional.rows; int seventh_filter_cols = seventh_gpu_layer->net.convolutional.cols; int seventh_filter_count = seventh_gpu_layer->net.convolutional.count; int seventh_filter_channels = seventh_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < seventh_filter_rows; y++) for (x = 0; x < seventh_filter_cols; x++) for (k = 0; k < seventh_filter_count; k++) for (c = 0; c < seventh_filter_channels; c++) { float p = seventh_cpu_configuration->w[(y * seventh_filter_cols + x) * seventh_filter_channels + k * seventh_filter_cols * seventh_filter_rows * seventh_filter_channels + c]; float q = seventh_grad[(y * seventh_filter_cols + x) * seventh_filter_count + k + c * seventh_filter_cols * seventh_filter_rows * seventh_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 7: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < seventh_filter_count; k++) { float p = seventh_cpu_configuration->bias[k]; float q = seventh_grad[seventh_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 7 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* sixth_cpu_configuration = update_params->layers + 5; int sixth_filter_rows = sixth_gpu_layer->net.convolutional.rows; int sixth_filter_cols = sixth_gpu_layer->net.convolutional.cols; int sixth_filter_count = sixth_gpu_layer->net.convolutional.count; int sixth_filter_channels = sixth_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < sixth_filter_rows; y++) for (x = 0; x < sixth_filter_cols; x++) for (k = 0; k < sixth_filter_count; k++) for (c = 0; c < sixth_filter_channels; c++) { float p = sixth_cpu_configuration->w[(y * sixth_filter_cols + x) * sixth_filter_channels + k * sixth_filter_cols * sixth_filter_rows * sixth_filter_channels + c]; float q = sixth_grad[(y * sixth_filter_cols + x) * sixth_filter_count + k + c * sixth_filter_cols * sixth_filter_rows * sixth_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 6: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < sixth_filter_count; k++) { float p = sixth_cpu_configuration->bias[k]; float q = sixth_grad[sixth_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 6 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* fifth_cpu_configuration = update_params->layers + 4; int fifth_filter_rows = fifth_gpu_layer->net.convolutional.rows; int fifth_filter_cols = fifth_gpu_layer->net.convolutional.cols; int fifth_filter_count = fifth_gpu_layer->net.convolutional.count; int fifth_filter_channels = fifth_gpu_layer->net.convolutional.channels; for (y = 0; y < fifth_filter_rows; y++) for (x = 0; x < fifth_filter_cols; x++) for (k = 0; k < fifth_filter_count; k++) for (c = 0; c < fifth_filter_channels; c++) { float p = fifth_cpu_configuration->w[(y * fifth_filter_cols + x) * fifth_filter_channels + k * fifth_filter_cols * fifth_filter_rows * fifth_filter_channels + c]; float q = fifth_grad[(y * fifth_filter_cols + x) * fifth_filter_count + k + c * fifth_filter_cols * fifth_filter_rows * fifth_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-2) printf("conv bprop 5: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < fifth_filter_count; k++) { float p = fifth_cpu_configuration->bias[k]; float q = fifth_grad[fifth_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 5 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* third_cpu_configuration = update_params->layers + 2; int third_filter_rows = third_gpu_layer->net.convolutional.rows; int third_filter_cols = third_gpu_layer->net.convolutional.cols; int third_filter_count = third_gpu_layer->net.convolutional.count; int third_filter_channels = third_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < third_filter_rows; y++) for (x = 0; x < third_filter_cols; x++) for (k = 0; k < third_filter_count; k++) for (c = 0; c < third_filter_channels; c++) { float p = third_cpu_configuration->w[(y * third_filter_cols + x) * third_filter_channels + k * third_filter_cols * third_filter_rows * third_filter_channels + c]; float q = third_grad[(y * third_filter_cols + x) * third_filter_count + k + c * third_filter_cols * third_filter_rows * third_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < third_filter_count; k++) { float p = third_cpu_configuration->bias[k]; float q = third_grad[third_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } ccv_convnet_layer_t* first_cpu_configuration = update_params->layers; int first_filter_rows = first_gpu_layer->net.convolutional.rows; int first_filter_cols = first_gpu_layer->net.convolutional.cols; int first_filter_count = first_gpu_layer->net.convolutional.count; int first_filter_channels = first_gpu_layer->net.convolutional.channels; for (y = 0; y < first_filter_rows; y++) for (x = 0; x < first_filter_cols; x++) for (k = 0; k < first_filter_count; k++) for (c = 0; c < first_filter_channels; c++) { float p = first_cpu_configuration->w[(y * first_filter_cols + x) * first_filter_channels + k * first_filter_cols * first_filter_rows * first_filter_channels + c]; float q = first_grad[(y * first_filter_cols + x) * first_filter_count + k + c * first_filter_cols * first_filter_rows * first_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 1: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } for (k = 0; k < first_filter_count; k++) { float p = first_cpu_configuration->bias[k]; float q = first_grad[first_gpu_layer->wnum + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 1 bias: %d: |%g - %g| = %g\n", k, p, q, delta); } cudaFreeHost(eleventh_in); }
faf3cf336b92f4e0ee649539de27b77a8c5b7f21.hip
// !!! This is a file automatically generated by hipify!!! //============================================================ // STUDENT NAME: <your name> // MATRIC NO. : <matric no.> // NUS EMAIL : <your NUS email address> // COMMENTS TO GRADER: // <comments to grader, if any> // // ============================================================ // // FILE: convolution.cu // Include files from C standard library. #include <stdlib.h> #include <stdio.h> #include <math.h> // Includes CUDA. #include <hip/hip_runtime.h> // Includes helper functions from CUDA Samples SDK. #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples ///////////////////////////////////////////////////////////////////////////// // CONSTANTS & GLOBAL VARIABLES ///////////////////////////////////////////////////////////////////////////// // FILTER_WIDTH must be odd, and BLOCK_SIZE >= FILTER_WIDTH. #define FILTER_WIDTH 249 // Number of CUDA threads per thread block. BLOCK_SIZE >= FILTER_WIDTH. #define BLOCK_SIZE 256 // Number of elements in the data. // Note that DATA_SIZE is always a multiple of BLOCK_SIZE. #define DATA_SIZE (2048 * BLOCK_SIZE) // Number of CUDA thread blocks. #define NUM_BLOCKS ( ( (DATA_SIZE) + (BLOCK_SIZE) - 1 ) / (BLOCK_SIZE) ) //=========================================================================== // CUDA Kernel 1. // Does not use shared memory. // Does not care about memory coalesces. // // Compute the convolution of the data and the filter. // Filter width (filterWidth) must be odd, and the filter's // origin is the center element, that is the element // filter[ filterWidth/2 ]. // // For each output element output[i] that does not have enough input // data elements in its neighborhood (that is when i < (filterWidth/2) or // when i >= (dataSize - filterWidth/2)), the output element output[i] // will have value 0.0. // //=========================================================================== __global__ void GPU_Convolve1( const float *data, int dataSize, const float *filter, int filterWidth, float *output ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int filterRadius = filterWidth / 2; //*********************************************** //*********** WRITE YOUR CODE HERE ************** //*********************************************** // set the defualt output to be zero output[tid] = 0.0; // if there is enough input in the neighborhood if (tid >= filterRadius && tid < dataSize - filterRadius) { int start = tid - filterRadius; // the start of input for ( int k = 0; k < filterWidth; k++ ) output[tid] += filter[k] * data[start + k]; } } //=========================================================================== // CUDA Kernel 2. // Use shared memory. // Care about memory coalesces. // Care about shared memory conflicts. // // Compute the convolution of the data and the filter. // Filter width (filterWidth) must be odd, and the filter's // origin is the center element, that is the element // filter[ filterWidth/2 ]. // // For each output element output[i] that does not have enough input // data elements in its neighborhood (that is when i < (filterWidth/2) or // when i >= (dataSize - filterWidth/2)), the output element output[i] // will have value 0.0. // // Assume that filterWidth <= BLOCK_SIZE. // Assume that dataSize is a multiple of BLOCK_SIZE. // //=========================================================================== __global__ void GPU_Convolve2( const float *data, int dataSize, const float *filter, int filterWidth, float *output ) { __shared__ float filterS[ BLOCK_SIZE ]; __shared__ float dataS[ 3 * BLOCK_SIZE ]; int tid = blockIdx.x * blockDim.x + threadIdx.x; int tx = threadIdx.x; int filterRadius = filterWidth / 2; //*********************************************** //*********** WRITE YOUR CODE HERE ************** //*********************************************** // write the filter array into shared memory filterS[tx] = filter[tx]; // write the input data array into shared memory if (blockIdx.x > 0) // if this is not the first block { // read the input from the previous block, and write to shared memory dataS[tx] = data[tid - BLOCK_SIZE]; } if (blockIdx.x < NUM_BLOCKS - 1) // if this is not the last block { // read the input from the next block, and write to shared memory dataS[tx + 2 * BLOCK_SIZE] = data[tid + BLOCK_SIZE]; } // read the input from the current block, and write to shared memory dataS[tx + BLOCK_SIZE] = data[tid]; // sync to make sure all input data are loaded before computation __syncthreads(); // set the defualt output to be zero output[tid] = 0.0; // if there is enough input in the neighborhood if (tid >= filterRadius && tid < dataSize - filterRadius) { int start = tx + BLOCK_SIZE - filterRadius; // the start of input for ( int k = 0; k < filterWidth; k++ ) output[tid] += filterS[k] * dataS[start + k]; } } //=========================================================================== // CPU version. // // Compute the convolution of the data and the filter. // Filter width (filterWidth) must be odd, and the filter's // origin is the center element, that is the element // filter[ filterWidth/2 ]. // // For each output element output[i] that does not have enough input // data elements in its neighborhood (that is when i < (filterWidth/2) or // when i >= (dataSize - filterWidth/2)), the output element output[i] // will have value 0.0. // //=========================================================================== static void CPU_Convolve( const float *data, int dataSize, const float *filter, int filterWidth, float *output ) { int filterRadius = filterWidth / 2; for ( int i = 0; i < dataSize; i++ ) output[i] = 0.0; for ( int i = filterRadius; i < (dataSize - filterRadius); i++ ) for ( int k = 0; k < filterWidth; k++ ) output[i] += filter[k] * data[ i - filterRadius + k ]; } //=========================================================================== // Returns a random value in the range [min, max] from a uniform distribution. //=========================================================================== inline static double UniformRandom( double min, double max ) { return ( ((double)rand()) / RAND_MAX ) * (max - min) + min; } //=========================================================================== // Generates a set of random floating-point numbers in the range [min,max] // and put them in the the array A. //=========================================================================== static void GenerateRandomArray( float *A, int numElems, float min, float max ) { for ( int i = 0; i < numElems; i++ ) A[i] = (float) UniformRandom( min, max ); } //=========================================================================== // Return true iff all corresponding elements in the float arrays A and B // are approximately equal (i.e. the absolute difference is within the // given epsilon). //=========================================================================== static bool FloatArrayEqual( const float *A, const float *B, int numElems, float epsilon ) { for ( int i = 0; i < numElems; i++ ) if ( fabs( A[i] - B[i] ) > epsilon ) return false; return true; } void WaitForEnterKeyBeforeExit( void ) { fflush( stdin ); getchar(); } //=========================================================================== // The main function //=========================================================================== int main( int argc, char** argv ) { atexit( WaitForEnterKeyBeforeExit ); // Set seed for rand(). srand( 123 ); // Use command-line specified CUDA device, otherwise use device with highest Gflops/s. int devID = findCudaDevice( argc, (const char **)argv ); // Create a timer. StopWatchInterface *timer = 0; sdkCreateTimer( &timer ); //--------------------------------------------------------------------------- // Allocate memory and generate test data. //--------------------------------------------------------------------------- // Allocate host memory for filter, input data and result arrays. float *h_filter = (float *) malloc( FILTER_WIDTH * sizeof(float) ); float *h_data = (float *) malloc( DATA_SIZE * sizeof(float) ); float *h_output = (float *) malloc( DATA_SIZE * sizeof(float) ); // Allocate host memory for receiving results from the GPU. float *d2h_output1 = (float *) malloc( DATA_SIZE * sizeof(float) ); float *d2h_output2 = (float *) malloc( DATA_SIZE * sizeof(float) ); // Allocate device memory. float *d_filter, *d_data, *d_output; checkCudaErrors( hipMalloc( (void**) &d_filter, FILTER_WIDTH * sizeof(float) ) ); checkCudaErrors( hipMalloc( (void**) &d_data, DATA_SIZE * sizeof(float) ) ); checkCudaErrors( hipMalloc( (void**) &d_output, DATA_SIZE * sizeof(float) ) ); // Fill the host filter and data arrays with random floating-point numbers. GenerateRandomArray( h_filter, FILTER_WIDTH, 0.0, 1.0 ); GenerateRandomArray( h_data, DATA_SIZE, 1.0, 5.0 ); //--------------------------------------------------------------------------- // Print some program parameter values. //--------------------------------------------------------------------------- printf( "Filter width = %d\n", FILTER_WIDTH ); printf( "Data size = %d\n", DATA_SIZE ); printf( "Thread block size = %d\n", BLOCK_SIZE ); printf( "Number of thread blocks = %d\n", NUM_BLOCKS ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Perform computation on CPU. //--------------------------------------------------------------------------- printf( "CPU COMPUTATION:\n" ); // Reset and start timer. sdkResetTimer( &timer ); sdkStartTimer( &timer ); // Compute on CPU. CPU_Convolve( h_data, DATA_SIZE, h_filter, FILTER_WIDTH, h_output ); // Stop timer. sdkStopTimer( &timer ); printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) ); // Print some results. printf( "First element = %.8f\n", h_output[0] ); printf( "Middle element = %.8f\n", h_output[ DATA_SIZE / 2 ] ); printf( "Last element = %.8f\n", h_output[ DATA_SIZE - 1 ] ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Perform computation on GPU using Kernel 1 (not using shared memory). //--------------------------------------------------------------------------- printf( "GPU COMPUTATION 1 (not using Shared Memory):\n" ); // Reset and start timer. sdkResetTimer( &timer ); sdkStartTimer( &timer ); // Copy host memory to device. checkCudaErrors( hipMemcpy( d_filter, h_filter, FILTER_WIDTH * sizeof(float), hipMemcpyHostToDevice ) ); checkCudaErrors( hipMemcpy( d_data, h_data, DATA_SIZE * sizeof(float), hipMemcpyHostToDevice ) ); // Clear the output array in device memory. checkCudaErrors( hipMemset( d_output, 0, DATA_SIZE * sizeof(float) ) ); // Execute the kernel. hipLaunchKernelGGL(( GPU_Convolve1) , dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, DATA_SIZE, d_filter, FILTER_WIDTH, d_output ); // Check if kernel execution generated any error. getLastCudaError( "Kernel execution failed" ); // Copy result from device memory to host. checkCudaErrors( hipMemcpy( d2h_output1, d_output, DATA_SIZE * sizeof(float), hipMemcpyDeviceToHost ) ); // Stop timer. sdkStopTimer( &timer ); printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) ); // Print some results. printf( "First element = %.8f\n", d2h_output1[0] ); printf( "Middle element = %.8f\n", d2h_output1[ DATA_SIZE / 2 ] ); printf( "Last element = %.8f\n", d2h_output1[ DATA_SIZE - 1 ] ); // Check result with reference result computed by CPU. bool equal1 = FloatArrayEqual( h_output, d2h_output1, DATA_SIZE, 0.001f ); printf( "Verify GPU result... %s\n", (equal1)? "PASS" : "FAIL" ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Perform computation on GPU using Kernel 2 (using shared memory). //--------------------------------------------------------------------------- printf( "GPU COMPUTATION 2 (using Shared Memory):\n" ); // Reset and start timer. sdkResetTimer( &timer ); sdkStartTimer( &timer ); // Copy host memory to device. checkCudaErrors( hipMemcpy( d_filter, h_filter, FILTER_WIDTH * sizeof(float), hipMemcpyHostToDevice ) ); checkCudaErrors( hipMemcpy( d_data, h_data, DATA_SIZE * sizeof(float), hipMemcpyHostToDevice ) ); // Clear the output array in device memory. checkCudaErrors( hipMemset( d_output, 0, DATA_SIZE * sizeof(float) ) ); // Execute the kernel. hipLaunchKernelGGL(( GPU_Convolve2) , dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, DATA_SIZE, d_filter, FILTER_WIDTH, d_output ); // Check if kernel execution generated any error. getLastCudaError( "Kernel execution failed" ); // Copy result from device memory to host. checkCudaErrors( hipMemcpy( d2h_output2, d_output, DATA_SIZE * sizeof(float), hipMemcpyDeviceToHost ) ); // Stop timer. sdkStopTimer( &timer ); printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) ); // Print some results. printf( "First element = %.8f\n", d2h_output2[0] ); printf( "Middle element = %.8f\n", d2h_output2[ DATA_SIZE / 2 ] ); printf( "Last element = %.8f\n", d2h_output2[ DATA_SIZE - 1 ] ); // Check result with reference result computed by CPU. bool equal2 = FloatArrayEqual( h_output, d2h_output2, DATA_SIZE, 0.001f ); printf( "Verify GPU result... %s\n", (equal2)? "PASS" : "FAIL" ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Clean up. //--------------------------------------------------------------------------- // Destroy the timer. sdkDeleteTimer( &timer ); // Free up memory. free( h_filter ); free( h_data ); free( h_output ); free( d2h_output1 ); free( d2h_output2 ); checkCudaErrors( hipFree( d_filter ) ); checkCudaErrors( hipFree( d_data ) ); checkCudaErrors( hipFree( d_output ) ); hipDeviceReset(); }
faf3cf336b92f4e0ee649539de27b77a8c5b7f21.cu
//============================================================ // STUDENT NAME: <your name> // MATRIC NO. : <matric no.> // NUS EMAIL : <your NUS email address> // COMMENTS TO GRADER: // <comments to grader, if any> // // ============================================================ // // FILE: convolution.cu // Include files from C standard library. #include <stdlib.h> #include <stdio.h> #include <math.h> // Includes CUDA. #include <cuda_runtime.h> // Includes helper functions from CUDA Samples SDK. #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples ///////////////////////////////////////////////////////////////////////////// // CONSTANTS & GLOBAL VARIABLES ///////////////////////////////////////////////////////////////////////////// // FILTER_WIDTH must be odd, and BLOCK_SIZE >= FILTER_WIDTH. #define FILTER_WIDTH 249 // Number of CUDA threads per thread block. BLOCK_SIZE >= FILTER_WIDTH. #define BLOCK_SIZE 256 // Number of elements in the data. // Note that DATA_SIZE is always a multiple of BLOCK_SIZE. #define DATA_SIZE (2048 * BLOCK_SIZE) // Number of CUDA thread blocks. #define NUM_BLOCKS ( ( (DATA_SIZE) + (BLOCK_SIZE) - 1 ) / (BLOCK_SIZE) ) //=========================================================================== // CUDA Kernel 1. // Does not use shared memory. // Does not care about memory coalesces. // // Compute the convolution of the data and the filter. // Filter width (filterWidth) must be odd, and the filter's // origin is the center element, that is the element // filter[ filterWidth/2 ]. // // For each output element output[i] that does not have enough input // data elements in its neighborhood (that is when i < (filterWidth/2) or // when i >= (dataSize - filterWidth/2)), the output element output[i] // will have value 0.0. // //=========================================================================== __global__ void GPU_Convolve1( const float *data, int dataSize, const float *filter, int filterWidth, float *output ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int filterRadius = filterWidth / 2; //*********************************************** //*********** WRITE YOUR CODE HERE ************** //*********************************************** // set the defualt output to be zero output[tid] = 0.0; // if there is enough input in the neighborhood if (tid >= filterRadius && tid < dataSize - filterRadius) { int start = tid - filterRadius; // the start of input for ( int k = 0; k < filterWidth; k++ ) output[tid] += filter[k] * data[start + k]; } } //=========================================================================== // CUDA Kernel 2. // Use shared memory. // Care about memory coalesces. // Care about shared memory conflicts. // // Compute the convolution of the data and the filter. // Filter width (filterWidth) must be odd, and the filter's // origin is the center element, that is the element // filter[ filterWidth/2 ]. // // For each output element output[i] that does not have enough input // data elements in its neighborhood (that is when i < (filterWidth/2) or // when i >= (dataSize - filterWidth/2)), the output element output[i] // will have value 0.0. // // Assume that filterWidth <= BLOCK_SIZE. // Assume that dataSize is a multiple of BLOCK_SIZE. // //=========================================================================== __global__ void GPU_Convolve2( const float *data, int dataSize, const float *filter, int filterWidth, float *output ) { __shared__ float filterS[ BLOCK_SIZE ]; __shared__ float dataS[ 3 * BLOCK_SIZE ]; int tid = blockIdx.x * blockDim.x + threadIdx.x; int tx = threadIdx.x; int filterRadius = filterWidth / 2; //*********************************************** //*********** WRITE YOUR CODE HERE ************** //*********************************************** // write the filter array into shared memory filterS[tx] = filter[tx]; // write the input data array into shared memory if (blockIdx.x > 0) // if this is not the first block { // read the input from the previous block, and write to shared memory dataS[tx] = data[tid - BLOCK_SIZE]; } if (blockIdx.x < NUM_BLOCKS - 1) // if this is not the last block { // read the input from the next block, and write to shared memory dataS[tx + 2 * BLOCK_SIZE] = data[tid + BLOCK_SIZE]; } // read the input from the current block, and write to shared memory dataS[tx + BLOCK_SIZE] = data[tid]; // sync to make sure all input data are loaded before computation __syncthreads(); // set the defualt output to be zero output[tid] = 0.0; // if there is enough input in the neighborhood if (tid >= filterRadius && tid < dataSize - filterRadius) { int start = tx + BLOCK_SIZE - filterRadius; // the start of input for ( int k = 0; k < filterWidth; k++ ) output[tid] += filterS[k] * dataS[start + k]; } } //=========================================================================== // CPU version. // // Compute the convolution of the data and the filter. // Filter width (filterWidth) must be odd, and the filter's // origin is the center element, that is the element // filter[ filterWidth/2 ]. // // For each output element output[i] that does not have enough input // data elements in its neighborhood (that is when i < (filterWidth/2) or // when i >= (dataSize - filterWidth/2)), the output element output[i] // will have value 0.0. // //=========================================================================== static void CPU_Convolve( const float *data, int dataSize, const float *filter, int filterWidth, float *output ) { int filterRadius = filterWidth / 2; for ( int i = 0; i < dataSize; i++ ) output[i] = 0.0; for ( int i = filterRadius; i < (dataSize - filterRadius); i++ ) for ( int k = 0; k < filterWidth; k++ ) output[i] += filter[k] * data[ i - filterRadius + k ]; } //=========================================================================== // Returns a random value in the range [min, max] from a uniform distribution. //=========================================================================== inline static double UniformRandom( double min, double max ) { return ( ((double)rand()) / RAND_MAX ) * (max - min) + min; } //=========================================================================== // Generates a set of random floating-point numbers in the range [min,max] // and put them in the the array A. //=========================================================================== static void GenerateRandomArray( float *A, int numElems, float min, float max ) { for ( int i = 0; i < numElems; i++ ) A[i] = (float) UniformRandom( min, max ); } //=========================================================================== // Return true iff all corresponding elements in the float arrays A and B // are approximately equal (i.e. the absolute difference is within the // given epsilon). //=========================================================================== static bool FloatArrayEqual( const float *A, const float *B, int numElems, float epsilon ) { for ( int i = 0; i < numElems; i++ ) if ( fabs( A[i] - B[i] ) > epsilon ) return false; return true; } void WaitForEnterKeyBeforeExit( void ) { fflush( stdin ); getchar(); } //=========================================================================== // The main function //=========================================================================== int main( int argc, char** argv ) { atexit( WaitForEnterKeyBeforeExit ); // Set seed for rand(). srand( 123 ); // Use command-line specified CUDA device, otherwise use device with highest Gflops/s. int devID = findCudaDevice( argc, (const char **)argv ); // Create a timer. StopWatchInterface *timer = 0; sdkCreateTimer( &timer ); //--------------------------------------------------------------------------- // Allocate memory and generate test data. //--------------------------------------------------------------------------- // Allocate host memory for filter, input data and result arrays. float *h_filter = (float *) malloc( FILTER_WIDTH * sizeof(float) ); float *h_data = (float *) malloc( DATA_SIZE * sizeof(float) ); float *h_output = (float *) malloc( DATA_SIZE * sizeof(float) ); // Allocate host memory for receiving results from the GPU. float *d2h_output1 = (float *) malloc( DATA_SIZE * sizeof(float) ); float *d2h_output2 = (float *) malloc( DATA_SIZE * sizeof(float) ); // Allocate device memory. float *d_filter, *d_data, *d_output; checkCudaErrors( cudaMalloc( (void**) &d_filter, FILTER_WIDTH * sizeof(float) ) ); checkCudaErrors( cudaMalloc( (void**) &d_data, DATA_SIZE * sizeof(float) ) ); checkCudaErrors( cudaMalloc( (void**) &d_output, DATA_SIZE * sizeof(float) ) ); // Fill the host filter and data arrays with random floating-point numbers. GenerateRandomArray( h_filter, FILTER_WIDTH, 0.0, 1.0 ); GenerateRandomArray( h_data, DATA_SIZE, 1.0, 5.0 ); //--------------------------------------------------------------------------- // Print some program parameter values. //--------------------------------------------------------------------------- printf( "Filter width = %d\n", FILTER_WIDTH ); printf( "Data size = %d\n", DATA_SIZE ); printf( "Thread block size = %d\n", BLOCK_SIZE ); printf( "Number of thread blocks = %d\n", NUM_BLOCKS ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Perform computation on CPU. //--------------------------------------------------------------------------- printf( "CPU COMPUTATION:\n" ); // Reset and start timer. sdkResetTimer( &timer ); sdkStartTimer( &timer ); // Compute on CPU. CPU_Convolve( h_data, DATA_SIZE, h_filter, FILTER_WIDTH, h_output ); // Stop timer. sdkStopTimer( &timer ); printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) ); // Print some results. printf( "First element = %.8f\n", h_output[0] ); printf( "Middle element = %.8f\n", h_output[ DATA_SIZE / 2 ] ); printf( "Last element = %.8f\n", h_output[ DATA_SIZE - 1 ] ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Perform computation on GPU using Kernel 1 (not using shared memory). //--------------------------------------------------------------------------- printf( "GPU COMPUTATION 1 (not using Shared Memory):\n" ); // Reset and start timer. sdkResetTimer( &timer ); sdkStartTimer( &timer ); // Copy host memory to device. checkCudaErrors( cudaMemcpy( d_filter, h_filter, FILTER_WIDTH * sizeof(float), cudaMemcpyHostToDevice ) ); checkCudaErrors( cudaMemcpy( d_data, h_data, DATA_SIZE * sizeof(float), cudaMemcpyHostToDevice ) ); // Clear the output array in device memory. checkCudaErrors( cudaMemset( d_output, 0, DATA_SIZE * sizeof(float) ) ); // Execute the kernel. GPU_Convolve1 <<<NUM_BLOCKS, BLOCK_SIZE>>> ( d_data, DATA_SIZE, d_filter, FILTER_WIDTH, d_output ); // Check if kernel execution generated any error. getLastCudaError( "Kernel execution failed" ); // Copy result from device memory to host. checkCudaErrors( cudaMemcpy( d2h_output1, d_output, DATA_SIZE * sizeof(float), cudaMemcpyDeviceToHost ) ); // Stop timer. sdkStopTimer( &timer ); printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) ); // Print some results. printf( "First element = %.8f\n", d2h_output1[0] ); printf( "Middle element = %.8f\n", d2h_output1[ DATA_SIZE / 2 ] ); printf( "Last element = %.8f\n", d2h_output1[ DATA_SIZE - 1 ] ); // Check result with reference result computed by CPU. bool equal1 = FloatArrayEqual( h_output, d2h_output1, DATA_SIZE, 0.001f ); printf( "Verify GPU result... %s\n", (equal1)? "PASS" : "FAIL" ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Perform computation on GPU using Kernel 2 (using shared memory). //--------------------------------------------------------------------------- printf( "GPU COMPUTATION 2 (using Shared Memory):\n" ); // Reset and start timer. sdkResetTimer( &timer ); sdkStartTimer( &timer ); // Copy host memory to device. checkCudaErrors( cudaMemcpy( d_filter, h_filter, FILTER_WIDTH * sizeof(float), cudaMemcpyHostToDevice ) ); checkCudaErrors( cudaMemcpy( d_data, h_data, DATA_SIZE * sizeof(float), cudaMemcpyHostToDevice ) ); // Clear the output array in device memory. checkCudaErrors( cudaMemset( d_output, 0, DATA_SIZE * sizeof(float) ) ); // Execute the kernel. GPU_Convolve2 <<<NUM_BLOCKS, BLOCK_SIZE>>> ( d_data, DATA_SIZE, d_filter, FILTER_WIDTH, d_output ); // Check if kernel execution generated any error. getLastCudaError( "Kernel execution failed" ); // Copy result from device memory to host. checkCudaErrors( cudaMemcpy( d2h_output2, d_output, DATA_SIZE * sizeof(float), cudaMemcpyDeviceToHost ) ); // Stop timer. sdkStopTimer( &timer ); printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) ); // Print some results. printf( "First element = %.8f\n", d2h_output2[0] ); printf( "Middle element = %.8f\n", d2h_output2[ DATA_SIZE / 2 ] ); printf( "Last element = %.8f\n", d2h_output2[ DATA_SIZE - 1 ] ); // Check result with reference result computed by CPU. bool equal2 = FloatArrayEqual( h_output, d2h_output2, DATA_SIZE, 0.001f ); printf( "Verify GPU result... %s\n", (equal2)? "PASS" : "FAIL" ); printf( "\n\n" ); //--------------------------------------------------------------------------- // Clean up. //--------------------------------------------------------------------------- // Destroy the timer. sdkDeleteTimer( &timer ); // Free up memory. free( h_filter ); free( h_data ); free( h_output ); free( d2h_output1 ); free( d2h_output2 ); checkCudaErrors( cudaFree( d_filter ) ); checkCudaErrors( cudaFree( d_data ) ); checkCudaErrors( cudaFree( d_output ) ); cudaDeviceReset(); }
a9bfe7a2b6fcacac66b38638810fb7f5797bffda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> void gpu_blur (std::string filename) { BMP Background; Background.ReadFromFile(filename.c_str()); int height = Background.TellHeight(); int width = Background.TellWidth(); int depth = Background.TellBitDepth(); BMP Output(Background); hipError_t cuda_ret; int *weights_d; int weights[18]; //SET WEIGHTS: for(int i = 0; i < 9; i++){ weights[i] = 1; } weights[9] = 16; weights[10] = 8; weights[11] = 16; weights[12] = 8; weights[13] = 4; weights[14] = 8; weights[15] = 16; weights[16] = 8; weights[17] = 16; //WEIGHTS SET //Timing start hipEvent_t begin, end; float time; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord(begin, 0); ebmpBYTE *A_h, *B_h; ebmpBYTE *A_d, *B_d; A_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); B_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ A_h[(i*width+j)*3] = Background.Pixels[i][j].Blue; A_h[(i*width+j)*3+1] = Background.Pixels[i][j].Green; A_h[(i*width+j)*3+2] = Background.Pixels[i][j].Red; } } dim3 dim_grid, dim_block; hipMalloc((void**)&weights_d, sizeof(int)*18 ); hipMalloc((void**)&A_d, sizeof(ebmpBYTE)*width*height*3); hipMalloc((void**)&B_d, sizeof(ebmpBYTE)*width*height*3); hipDeviceSynchronize(); hipMemcpy(weights_d, &weights[0], sizeof(int)*18, hipMemcpyHostToDevice); hipMemcpy(A_d, A_h, sizeof(ebmpBYTE)*width*height*3, hipMemcpyHostToDevice); hipDeviceSynchronize(); dim3 DimGrid(1, 1, 1); dim3 DimBlock(1024, 1, 1); hipLaunchKernelGGL(( gpu_filter), dim3(DimGrid), dim3(DimBlock), 0, 0, A_d, B_d, weights_d, width, height); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) printf("error"); hipMemcpy(B_h, B_d, sizeof(ebmpBYTE)*width*height*3, hipMemcpyDeviceToHost); hipDeviceSynchronize(); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ Output.Pixels[i][j].Blue = B_h[(i*width+j)*3]; Output.Pixels[i][j].Green = B_h[(i*width+j)*3+1]; Output.Pixels[i][j].Red = B_h[(i*width+j)*3+2]; } } //Timing end hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&time, begin, end); printf("GPU Blur time: %f ms \n\n", time ); std::string fileout = filename; fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); string extra = "_gpu_blur.bmp"; fileout = fileout + extra; Output.WriteToFile(fileout.c_str()); free(A_h); free(B_h); hipFree(weights_d); hipFree(B_d); hipFree(A_d); return; }
a9bfe7a2b6fcacac66b38638810fb7f5797bffda.cu
#include <string> void gpu_blur (std::string filename) { BMP Background; Background.ReadFromFile(filename.c_str()); int height = Background.TellHeight(); int width = Background.TellWidth(); int depth = Background.TellBitDepth(); BMP Output(Background); cudaError_t cuda_ret; int *weights_d; int weights[18]; //SET WEIGHTS: for(int i = 0; i < 9; i++){ weights[i] = 1; } weights[9] = 16; weights[10] = 8; weights[11] = 16; weights[12] = 8; weights[13] = 4; weights[14] = 8; weights[15] = 16; weights[16] = 8; weights[17] = 16; //WEIGHTS SET //Timing start cudaEvent_t begin, end; float time; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord(begin, 0); ebmpBYTE *A_h, *B_h; ebmpBYTE *A_d, *B_d; A_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); B_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ A_h[(i*width+j)*3] = Background.Pixels[i][j].Blue; A_h[(i*width+j)*3+1] = Background.Pixels[i][j].Green; A_h[(i*width+j)*3+2] = Background.Pixels[i][j].Red; } } dim3 dim_grid, dim_block; cudaMalloc((void**)&weights_d, sizeof(int)*18 ); cudaMalloc((void**)&A_d, sizeof(ebmpBYTE)*width*height*3); cudaMalloc((void**)&B_d, sizeof(ebmpBYTE)*width*height*3); cudaDeviceSynchronize(); cudaMemcpy(weights_d, &weights[0], sizeof(int)*18, cudaMemcpyHostToDevice); cudaMemcpy(A_d, A_h, sizeof(ebmpBYTE)*width*height*3, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); dim3 DimGrid(1, 1, 1); dim3 DimBlock(1024, 1, 1); gpu_filter<<<DimGrid, DimBlock>>>(A_d, B_d, weights_d, width, height); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) printf("error"); cudaMemcpy(B_h, B_d, sizeof(ebmpBYTE)*width*height*3, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ Output.Pixels[i][j].Blue = B_h[(i*width+j)*3]; Output.Pixels[i][j].Green = B_h[(i*width+j)*3+1]; Output.Pixels[i][j].Red = B_h[(i*width+j)*3+2]; } } //Timing end cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&time, begin, end); printf("GPU Blur time: %f ms \n\n", time ); std::string fileout = filename; fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); string extra = "_gpu_blur.bmp"; fileout = fileout + extra; Output.WriteToFile(fileout.c_str()); free(A_h); free(B_h); cudaFree(weights_d); cudaFree(B_d); cudaFree(A_d); return; }
836a5e4628631f28ca2fcf8fc63b00953a3ade68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdio.h> #include <math.h> #include "KernelUtils.h" #include "HostUtils.h" #include "GeneNetworkKernelAnalisys.h" #include <list> #include <vector> #include <queue> using namespace std; texture<int, 1, hipReadModeElementType> regulationMatrixTextRef; /** * Verify if the state v2 arrives on v1 (bitsPathernV1) */ __device__ void verifyBeforeStateMult( int regMatrixSizeX, int regMatrixSizeY, int v1, int v2, bool *outCreateEdge ) { bool stopComputation = false; *(outCreateEdge) = true; for (int row = 0; (row < regMatrixSizeY) && !stopComputation; row++) { int number = v2; int bitPathernV2 = 0; int bitQPermanent = 0; for (int col = regMatrixSizeX - 1; (col >= 0) && (number > 0); col--) { int idxMatrix = row * regMatrixSizeY + col; // Generate digits in reverse order int bitQ = number & 1; // number % 2 if( row == col ) { bitQPermanent = bitQ; } bitPathernV2 += tex1Dfetch( regulationMatrixTextRef, idxMatrix ) * bitQ; number /= 2; } // Normalization bitPathernV2 = bitPathernV2 == 0 ? bitQPermanent : bitPathernV2 < 0 ? 0 : 1; // int bitPathernV1 = (v1 / (int) powf( 2, regMatrixSizeY - row - 1 )) % 2; long one = 1; int bitPathernV1 = (v1 / (one << (regMatrixSizeX - row - 1))) & 1; // Fast binary operations: // (i & (n-1)) = i % n // v1 / (1 << (regMatrixSizeX - row - 1)) // Verify result: // All bits in v1 must be equals bitPathernV2 if( bitPathernV1 != bitPathernV2 ) { *(outCreateEdge) = false; stopComputation = true; } } } /** * Compute all states that arrive on v1 (bitsPathernV1), * that is, all states immediately previous to v1 */ __global__ void kernelCalculateBeforeStates( int regMatrixSizeX, int regMatrixSizeY, long v1, bool *visitedStates, int *outBeforeStates, int *outSizeBeforeStates, int maxNumberOfStates, int offset ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int v2 = tid + offset; // Process state v2 only if isn't visited yet if( v2 < maxNumberOfStates && visitedStates[v2] == false ) { bool createEdge = true; // Verify if v2 state arrives in v1 verifyBeforeStateMult( regMatrixSizeX, regMatrixSizeY, v1, v2, &createEdge ); if( createEdge ) { // Exists a vertex from v2 to v1 int nextIndex = atomicAdd( &outSizeBeforeStates[0], 1 ); outBeforeStates[nextIndex] = v2; visitedStates[v2] = true; } } } __global__ void kernelCalculateNextState( int v1, int regMatrixSizeX, int regMatrixSizeY, int *outV2 ) { // One row per thread int row = blockIdx.x + threadIdx.x; int number = v1; int bitPathernV2 = 0; int bitQPermanent = 0; for (int col = regMatrixSizeX - 1; (col >= 0) && (number > 0); col--) { int idxMatrix = row * regMatrixSizeY + col; // Generate digits in reverse order int bitQ = number & 1; // number % 2 if( row == col ) { bitQPermanent = bitQ; } bitPathernV2 += tex1Dfetch( regulationMatrixTextRef, idxMatrix ) * bitQ; number /= 2; } // Normalization bitPathernV2 = bitPathernV2 == 0 ? bitQPermanent : bitPathernV2 < 0 ? 0 : 1; // Exists an arc v1 -> outV2 // int value = bitPathernV2 * powf( 2, (regMatrixSizeY - row - 1) ); long one = 1; int value = bitPathernV2 * (one << (regMatrixSizeX - row - 1)); atomicAdd( &outV2[0], value ); } int executeKernelCalculateNextState( long long int v1, int *regulationMatrix, int regMatrixSizeX, int regMatrixSizeY ) { printf( "....Start: executeKernelCalculateNextStage \n\n" ); // Part 1 of 6: define kernel configuration int numBlocksOnGridX = 1; int numBlocksOnGridY = 1; int numThreadsPerBlockX = regMatrixSizeX; int numThreadsPerBlockY = 1; // Number of blocks on grid dim3 dimGrid( numBlocksOnGridX, numBlocksOnGridY ); // Number of threads per block dim3 dimBlock( numThreadsPerBlockX, numThreadsPerBlockY ); int sizeOutV2 = 1; int sizeRegulationMatrix = (regMatrixSizeX * regMatrixSizeY); size_t memSizeOutV2 = sizeOutV2 * sizeof(int); size_t memSizeRegulationMatrix = sizeRegulationMatrix * sizeof(int); printf( "Number of blocks used: %d x %d = %d\n", numBlocksOnGridX, numBlocksOnGridY, (numBlocksOnGridX * numBlocksOnGridY) ); printf( "Number of threads used: %d x %d = %d\n", numThreadsPerBlockX, numThreadsPerBlockY, (numThreadsPerBlockX * numThreadsPerBlockY) ); // Part 2 of 6: allocate host memory int *outV2 = getPointerToMatrix( memSizeOutV2 ); outV2[0] = 0; int *outV2Dev = NULL; int *regulationMatrixDev = NULL; // Regulation matrix allocation memory hipMalloc( (void **) &regulationMatrixDev, memSizeRegulationMatrix ); hipBindTexture( 0, regulationMatrixTextRef, regulationMatrixDev, memSizeRegulationMatrix ); hipMemcpy( regulationMatrixDev, regulationMatrix, memSizeRegulationMatrix, hipMemcpyHostToDevice ); checkCUDAError( "regulationMatrixDev Memory Allocation" ); hipMalloc( (void **) &outV2Dev, memSizeOutV2 ); hipMemcpy( outV2Dev, outV2, memSizeOutV2, hipMemcpyHostToDevice ); checkCUDAError( "outV2Dev Memory Allocation" ); // Part 5 of 6: launch kernel hipEvent_t start, stop; float time; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); hipLaunchKernelGGL(( kernelCalculateNextState), dim3(dimGrid) , dim3(dimBlock), 0, 0, v1,regMatrixSizeX, regMatrixSizeY, outV2Dev ); // block until the device has completed hipDeviceSynchronize(); // Compute time of kernel execution in milliseconds hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); printf( "time %f s \n", (time / 1000) ); checkCUDAError( "Kernel execution" ); hipMemcpy( outV2, outV2Dev, memSizeOutV2, hipMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError( "Memory copy" ); // free device memory hipFree( outV2Dev ); hipFree( regulationMatrixDev ); hipUnbindTexture( regulationMatrixTextRef ); return outV2[0]; } int MAX_POSSIBLE_BEFORE_NODES = 10000; bool * allocVisitedStatesOnCuda( size_t memVisitedStates ) { bool *visitedStates = NULL; hipHostMalloc( (void **) &visitedStates, memVisitedStates, hipHostMallocMapped ); return visitedStates; } long executeKernelAccountBasinOfAtraction( list<long> atractorsList, bool *visitedStates, int sizeStatesVisited, int *regulationMatrix, int regMatrixSizeX, int regMatrixSizeY ) { printf( "....Start: executeKernelAccountBasinOfAtraction \n\n" ); // Part 1: define kernel configuration long long int maxNumberOfStates = (int) pow( 2.0, regMatrixSizeX ); long long int numBlocksOnGrid = 0; long long int numThreadsPerBlock = 0; long long int numIterations = 0; long long int restThreadsToExecute = 0; calculateKernelLaunchConfiguration( maxNumberOfStates, &numThreadsPerBlock, &numBlocksOnGrid, &numIterations, &restThreadsToExecute ); long long int totalNumIterationsKernel = numIterations + (restThreadsToExecute <= 0 ? 0 : 1); int sizeMatrix = (regMatrixSizeX * regMatrixSizeY); int sizeBeforeStatesNum = 1; size_t memMatrixSize = sizeMatrix * sizeof(int); size_t memSizeNumBeforeStates = sizeBeforeStatesNum * sizeof(int); size_t memBeforeStates = MAX_POSSIBLE_BEFORE_NODES * sizeof(int); size_t memVisitedStates = maxNumberOfStates * sizeof(bool); dim3 dimBlock( numThreadsPerBlock ); dim3 dimGrid( numBlocksOnGrid ); printf( "Number of genes: %d \n", regMatrixSizeX ); printf( "Size solution (2^%d) = %lld \n", regMatrixSizeX, maxNumberOfStates ); // 1MB = 1024^2 printf( "Number of blocks used: %lld \n", numBlocksOnGrid ); printf( "Number of threads used: %lld \n", numThreadsPerBlock ); printf( "Iterations: %lld + (rest: %lld) = %lld \n", numIterations, restThreadsToExecute, totalNumIterationsKernel ); // Host memory allocation int *beforeStates = getPointerToMatrix( memBeforeStates ); int *numBeforeStates = getPointerToMatrix( memSizeNumBeforeStates ); if( beforeStates == NULL || beforeStates <= 0 ) { printf( "Host error: beforeStages Memory Allocation \n" ); exit( 0 ); } numBeforeStates[0] = 0; // TODO: Verificar criao de um kernel para inicializar beforeStates for (int i = 0; i < MAX_POSSIBLE_BEFORE_NODES; i++) { beforeStates[i] = -1; } // Device out allocation memory int *regulationMatrixDev = NULL; bool *visitedStatesDev = NULL; int *outBeforeStatesDev = NULL; int *outNumBeforeStatesDev = NULL; hipMalloc( (void **) &regulationMatrixDev, memMatrixSize ); hipBindTexture( 0, regulationMatrixTextRef, regulationMatrixDev, memMatrixSize ); hipMemcpy( regulationMatrixDev, regulationMatrix, memMatrixSize, hipMemcpyHostToDevice ); checkCUDAError( "Memory Allocation" ); //------- Allocate Zero Copy memory ------- // hipHostMalloc( (void **) &visitedStates, memVisitedStates, // hipHostMallocMapped ); hipHostGetDevicePointer( (void **) &visitedStatesDev, (void *) visitedStates, 0 ); checkCUDAError( "visitedStatesDev Memory Allocation" ); // ------- hipMalloc( (void **) &outBeforeStatesDev, memBeforeStates ); checkCUDAError( "outBeforeStatesDev Memory Allocation" ); hipMalloc( (void **) &outNumBeforeStatesDev, memSizeNumBeforeStates ); checkCUDAError( "outBeforeStatesDev Memory Allocation" ); list<long>::iterator it; for (it = atractorsList.begin(); it != atractorsList.end(); ++it) { visitedStates[*it] = true; } long countConectedComponents = 0; hipEvent_t start, stop; float time; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); while( atractorsList.size() > 0 ) { int v1 = atractorsList.front(); atractorsList.pop_front(); int offset = 0; for (int i = 0; i < totalNumIterationsKernel; i++) { // Part 5 of 6: launch kernel // hipEvent_t start, stop; // float time; // hipEventCreate( &start ); // hipEventCreate( &stop ); // hipEventRecord( start, 0 ); numBeforeStates[0] = 0; hipMemcpy( outBeforeStatesDev, beforeStates, memBeforeStates, hipMemcpyHostToDevice ); checkCUDAError( "outBeforeStatesDev Memory Allocation" ); hipMemcpy( outNumBeforeStatesDev, numBeforeStates, memSizeNumBeforeStates, hipMemcpyHostToDevice ); checkCUDAError( "outNumBeforeStatesDev Memory Allocation" ); hipLaunchKernelGGL(( kernelCalculateBeforeStates) , dim3(dimGrid) , dim3(dimBlock), 0, 0, regMatrixSizeX, regMatrixSizeY, v1, visitedStatesDev, outBeforeStatesDev, outNumBeforeStatesDev, maxNumberOfStates, offset ); hipDeviceSynchronize(); offset += dimBlock.x * dimGrid.x; checkCUDAError( "Kernel execution" ); hipMemcpy( beforeStates, outBeforeStatesDev, memBeforeStates, hipMemcpyDeviceToHost ); hipMemcpy( numBeforeStates, outNumBeforeStatesDev, memSizeNumBeforeStates, hipMemcpyDeviceToHost ); hipMemcpy( visitedStates, visitedStatesDev, memVisitedStates, hipMemcpyDeviceToHost ); // // Compute time of kernel execution in milliseconds // hipEventRecord( stop, 0 ); // hipEventSynchronize( stop ); // hipEventElapsedTime( &time, start, stop ); // hipEventDestroy( start ); // hipEventDestroy( stop ); // // printf( "time %f s \n", (time / 1000) ); // printf( "Number of before states of %d: %d\n", v1, // numBeforeStates[0] ); countConectedComponents += numBeforeStates[0]; for (int i = 0; i < numBeforeStates[0]; i++) { atractorsList.push_back( beforeStates[i] ); beforeStates[i] = -1; // printf( "before[%d]: %d \t ", i, beforeStates[i] ); // printf( "visitedState[%d]: %d\n", beforeStates[i], visitedStates[beforeStates[i]] ); } } } // free device memory hipFree( outBeforeStatesDev ); hipFree( outNumBeforeStatesDev ); hipFree( regulationMatrixDev ); hipUnbindTexture( regulationMatrixTextRef ); // Compute time of kernel execution in milliseconds hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); printf( "Total Before Nodes reached: %ld \n", countConectedComponents ); printf( "time %f s \n\n", (time / 1000) ); checkCUDAError( "executeKernelAccountBasinOfAtraction Cuda Free" ); return countConectedComponents; }
836a5e4628631f28ca2fcf8fc63b00953a3ade68.cu
// includes, system #include <stdio.h> #include <math.h> #include "KernelUtils.h" #include "HostUtils.h" #include "GeneNetworkKernelAnalisys.h" #include <list> #include <vector> #include <queue> using namespace std; texture<int, 1, cudaReadModeElementType> regulationMatrixTextRef; /** * Verify if the state v2 arrives on v1 (bitsPathernV1) */ __device__ void verifyBeforeStateMult( int regMatrixSizeX, int regMatrixSizeY, int v1, int v2, bool *outCreateEdge ) { bool stopComputation = false; *(outCreateEdge) = true; for (int row = 0; (row < regMatrixSizeY) && !stopComputation; row++) { int number = v2; int bitPathernV2 = 0; int bitQPermanent = 0; for (int col = regMatrixSizeX - 1; (col >= 0) && (number > 0); col--) { int idxMatrix = row * regMatrixSizeY + col; // Generate digits in reverse order int bitQ = number & 1; // number % 2 if( row == col ) { bitQPermanent = bitQ; } bitPathernV2 += tex1Dfetch( regulationMatrixTextRef, idxMatrix ) * bitQ; number /= 2; } // Normalization bitPathernV2 = bitPathernV2 == 0 ? bitQPermanent : bitPathernV2 < 0 ? 0 : 1; // int bitPathernV1 = (v1 / (int) powf( 2, regMatrixSizeY - row - 1 )) % 2; long one = 1; int bitPathernV1 = (v1 / (one << (regMatrixSizeX - row - 1))) & 1; // Fast binary operations: // (i & (n-1)) = i % n // v1 / (1 << (regMatrixSizeX - row - 1)) // Verify result: // All bits in v1 must be equals bitPathernV2 if( bitPathernV1 != bitPathernV2 ) { *(outCreateEdge) = false; stopComputation = true; } } } /** * Compute all states that arrive on v1 (bitsPathernV1), * that is, all states immediately previous to v1 */ __global__ void kernelCalculateBeforeStates( int regMatrixSizeX, int regMatrixSizeY, long v1, bool *visitedStates, int *outBeforeStates, int *outSizeBeforeStates, int maxNumberOfStates, int offset ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int v2 = tid + offset; // Process state v2 only if isn't visited yet if( v2 < maxNumberOfStates && visitedStates[v2] == false ) { bool createEdge = true; // Verify if v2 state arrives in v1 verifyBeforeStateMult( regMatrixSizeX, regMatrixSizeY, v1, v2, &createEdge ); if( createEdge ) { // Exists a vertex from v2 to v1 int nextIndex = atomicAdd( &outSizeBeforeStates[0], 1 ); outBeforeStates[nextIndex] = v2; visitedStates[v2] = true; } } } __global__ void kernelCalculateNextState( int v1, int regMatrixSizeX, int regMatrixSizeY, int *outV2 ) { // One row per thread int row = blockIdx.x + threadIdx.x; int number = v1; int bitPathernV2 = 0; int bitQPermanent = 0; for (int col = regMatrixSizeX - 1; (col >= 0) && (number > 0); col--) { int idxMatrix = row * regMatrixSizeY + col; // Generate digits in reverse order int bitQ = number & 1; // number % 2 if( row == col ) { bitQPermanent = bitQ; } bitPathernV2 += tex1Dfetch( regulationMatrixTextRef, idxMatrix ) * bitQ; number /= 2; } // Normalization bitPathernV2 = bitPathernV2 == 0 ? bitQPermanent : bitPathernV2 < 0 ? 0 : 1; // Exists an arc v1 -> outV2 // int value = bitPathernV2 * powf( 2, (regMatrixSizeY - row - 1) ); long one = 1; int value = bitPathernV2 * (one << (regMatrixSizeX - row - 1)); atomicAdd( &outV2[0], value ); } int executeKernelCalculateNextState( long long int v1, int *regulationMatrix, int regMatrixSizeX, int regMatrixSizeY ) { printf( "....Start: executeKernelCalculateNextStage \n\n" ); // Part 1 of 6: define kernel configuration int numBlocksOnGridX = 1; int numBlocksOnGridY = 1; int numThreadsPerBlockX = regMatrixSizeX; int numThreadsPerBlockY = 1; // Number of blocks on grid dim3 dimGrid( numBlocksOnGridX, numBlocksOnGridY ); // Number of threads per block dim3 dimBlock( numThreadsPerBlockX, numThreadsPerBlockY ); int sizeOutV2 = 1; int sizeRegulationMatrix = (regMatrixSizeX * regMatrixSizeY); size_t memSizeOutV2 = sizeOutV2 * sizeof(int); size_t memSizeRegulationMatrix = sizeRegulationMatrix * sizeof(int); printf( "Number of blocks used: %d x %d = %d\n", numBlocksOnGridX, numBlocksOnGridY, (numBlocksOnGridX * numBlocksOnGridY) ); printf( "Number of threads used: %d x %d = %d\n", numThreadsPerBlockX, numThreadsPerBlockY, (numThreadsPerBlockX * numThreadsPerBlockY) ); // Part 2 of 6: allocate host memory int *outV2 = getPointerToMatrix( memSizeOutV2 ); outV2[0] = 0; int *outV2Dev = NULL; int *regulationMatrixDev = NULL; // Regulation matrix allocation memory cudaMalloc( (void **) &regulationMatrixDev, memSizeRegulationMatrix ); cudaBindTexture( 0, regulationMatrixTextRef, regulationMatrixDev, memSizeRegulationMatrix ); cudaMemcpy( regulationMatrixDev, regulationMatrix, memSizeRegulationMatrix, cudaMemcpyHostToDevice ); checkCUDAError( "regulationMatrixDev Memory Allocation" ); cudaMalloc( (void **) &outV2Dev, memSizeOutV2 ); cudaMemcpy( outV2Dev, outV2, memSizeOutV2, cudaMemcpyHostToDevice ); checkCUDAError( "outV2Dev Memory Allocation" ); // Part 5 of 6: launch kernel cudaEvent_t start, stop; float time; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); kernelCalculateNextState<<< dimGrid , dimBlock>>>(v1,regMatrixSizeX, regMatrixSizeY, outV2Dev ); // block until the device has completed cudaThreadSynchronize(); // Compute time of kernel execution in milliseconds cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); printf( "time %f s \n", (time / 1000) ); checkCUDAError( "Kernel execution" ); cudaMemcpy( outV2, outV2Dev, memSizeOutV2, cudaMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError( "Memory copy" ); // free device memory cudaFree( outV2Dev ); cudaFree( regulationMatrixDev ); cudaUnbindTexture( regulationMatrixTextRef ); return outV2[0]; } int MAX_POSSIBLE_BEFORE_NODES = 10000; bool * allocVisitedStatesOnCuda( size_t memVisitedStates ) { bool *visitedStates = NULL; cudaHostAlloc( (void **) &visitedStates, memVisitedStates, cudaHostAllocMapped ); return visitedStates; } long executeKernelAccountBasinOfAtraction( list<long> atractorsList, bool *visitedStates, int sizeStatesVisited, int *regulationMatrix, int regMatrixSizeX, int regMatrixSizeY ) { printf( "....Start: executeKernelAccountBasinOfAtraction \n\n" ); // Part 1: define kernel configuration long long int maxNumberOfStates = (int) pow( 2.0, regMatrixSizeX ); long long int numBlocksOnGrid = 0; long long int numThreadsPerBlock = 0; long long int numIterations = 0; long long int restThreadsToExecute = 0; calculateKernelLaunchConfiguration( maxNumberOfStates, &numThreadsPerBlock, &numBlocksOnGrid, &numIterations, &restThreadsToExecute ); long long int totalNumIterationsKernel = numIterations + (restThreadsToExecute <= 0 ? 0 : 1); int sizeMatrix = (regMatrixSizeX * regMatrixSizeY); int sizeBeforeStatesNum = 1; size_t memMatrixSize = sizeMatrix * sizeof(int); size_t memSizeNumBeforeStates = sizeBeforeStatesNum * sizeof(int); size_t memBeforeStates = MAX_POSSIBLE_BEFORE_NODES * sizeof(int); size_t memVisitedStates = maxNumberOfStates * sizeof(bool); dim3 dimBlock( numThreadsPerBlock ); dim3 dimGrid( numBlocksOnGrid ); printf( "Number of genes: %d \n", regMatrixSizeX ); printf( "Size solution (2^%d) = %lld \n", regMatrixSizeX, maxNumberOfStates ); // 1MB = 1024^2 printf( "Number of blocks used: %lld \n", numBlocksOnGrid ); printf( "Number of threads used: %lld \n", numThreadsPerBlock ); printf( "Iterations: %lld + (rest: %lld) = %lld \n", numIterations, restThreadsToExecute, totalNumIterationsKernel ); // Host memory allocation int *beforeStates = getPointerToMatrix( memBeforeStates ); int *numBeforeStates = getPointerToMatrix( memSizeNumBeforeStates ); if( beforeStates == NULL || beforeStates <= 0 ) { printf( "Host error: beforeStages Memory Allocation \n" ); exit( 0 ); } numBeforeStates[0] = 0; // TODO: Verificar criação de um kernel para inicializar beforeStates for (int i = 0; i < MAX_POSSIBLE_BEFORE_NODES; i++) { beforeStates[i] = -1; } // Device out allocation memory int *regulationMatrixDev = NULL; bool *visitedStatesDev = NULL; int *outBeforeStatesDev = NULL; int *outNumBeforeStatesDev = NULL; cudaMalloc( (void **) &regulationMatrixDev, memMatrixSize ); cudaBindTexture( 0, regulationMatrixTextRef, regulationMatrixDev, memMatrixSize ); cudaMemcpy( regulationMatrixDev, regulationMatrix, memMatrixSize, cudaMemcpyHostToDevice ); checkCUDAError( "Memory Allocation" ); //------- Allocate Zero Copy memory ------- // cudaHostAlloc( (void **) &visitedStates, memVisitedStates, // cudaHostAllocMapped ); cudaHostGetDevicePointer( (void **) &visitedStatesDev, (void *) visitedStates, 0 ); checkCUDAError( "visitedStatesDev Memory Allocation" ); // ------- cudaMalloc( (void **) &outBeforeStatesDev, memBeforeStates ); checkCUDAError( "outBeforeStatesDev Memory Allocation" ); cudaMalloc( (void **) &outNumBeforeStatesDev, memSizeNumBeforeStates ); checkCUDAError( "outBeforeStatesDev Memory Allocation" ); list<long>::iterator it; for (it = atractorsList.begin(); it != atractorsList.end(); ++it) { visitedStates[*it] = true; } long countConectedComponents = 0; cudaEvent_t start, stop; float time; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); while( atractorsList.size() > 0 ) { int v1 = atractorsList.front(); atractorsList.pop_front(); int offset = 0; for (int i = 0; i < totalNumIterationsKernel; i++) { // Part 5 of 6: launch kernel // cudaEvent_t start, stop; // float time; // cudaEventCreate( &start ); // cudaEventCreate( &stop ); // cudaEventRecord( start, 0 ); numBeforeStates[0] = 0; cudaMemcpy( outBeforeStatesDev, beforeStates, memBeforeStates, cudaMemcpyHostToDevice ); checkCUDAError( "outBeforeStatesDev Memory Allocation" ); cudaMemcpy( outNumBeforeStatesDev, numBeforeStates, memSizeNumBeforeStates, cudaMemcpyHostToDevice ); checkCUDAError( "outNumBeforeStatesDev Memory Allocation" ); kernelCalculateBeforeStates <<< dimGrid , dimBlock>>>( regMatrixSizeX, regMatrixSizeY, v1, visitedStatesDev, outBeforeStatesDev, outNumBeforeStatesDev, maxNumberOfStates, offset ); cudaThreadSynchronize(); offset += dimBlock.x * dimGrid.x; checkCUDAError( "Kernel execution" ); cudaMemcpy( beforeStates, outBeforeStatesDev, memBeforeStates, cudaMemcpyDeviceToHost ); cudaMemcpy( numBeforeStates, outNumBeforeStatesDev, memSizeNumBeforeStates, cudaMemcpyDeviceToHost ); cudaMemcpy( visitedStates, visitedStatesDev, memVisitedStates, cudaMemcpyDeviceToHost ); // // Compute time of kernel execution in milliseconds // cudaEventRecord( stop, 0 ); // cudaEventSynchronize( stop ); // cudaEventElapsedTime( &time, start, stop ); // cudaEventDestroy( start ); // cudaEventDestroy( stop ); // // printf( "time %f s \n", (time / 1000) ); // printf( "Number of before states of %d: %d\n", v1, // numBeforeStates[0] ); countConectedComponents += numBeforeStates[0]; for (int i = 0; i < numBeforeStates[0]; i++) { atractorsList.push_back( beforeStates[i] ); beforeStates[i] = -1; // printf( "before[%d]: %d \t ", i, beforeStates[i] ); // printf( "visitedState[%d]: %d\n", beforeStates[i], visitedStates[beforeStates[i]] ); } } } // free device memory cudaFree( outBeforeStatesDev ); cudaFree( outNumBeforeStatesDev ); cudaFree( regulationMatrixDev ); cudaUnbindTexture( regulationMatrixTextRef ); // Compute time of kernel execution in milliseconds cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); printf( "Total Before Nodes reached: %ld \n", countConectedComponents ); printf( "time %f s \n\n", (time / 1000) ); checkCUDAError( "executeKernelAccountBasinOfAtraction Cuda Free" ); return countConectedComponents; }
3ab71b5303cc7ac50c2bca1c67fcb6919e86da82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; hipHostMalloc((void **) &h_a, sizeof(int)*m*n); hipHostMalloc((void **) &h_b, sizeof(int)*n*k); hipHostMalloc((void **) &h_c, sizeof(int)*m*k); hipHostMalloc((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // start to count execution time of GPU version hipEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; hipMalloc((void **) &d_a, sizeof(int)*m*n); hipMalloc((void **) &d_b, sizeof(int)*n*k); hipMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory hipMemcpy(d_a, h_a, sizeof(int)*m*n, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int)*n*k, hipMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel if(m == n && n == k) { hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n); } else { hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, m, n, k); } // Transefr results from device to host hipMemcpy(h_c, d_c, sizeof(int)*m*k, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version hipEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if(h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_cc); return 0; }
3ab71b5303cc7ac50c2bca1c67fcb6919e86da82.cu
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m*n); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start to count execution time of GPU version cudaEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel if(m == n && n == k) { gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n); } else { gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k); } // Transefr results from device to host cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version cudaEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if(h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_cc); return 0; }
9fb011fb1feef666d21007f915db9c6d02e0410a.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <stdlib.h> #include <string> #include <cstdio> #include <iostream> #include <stdio.h> #include <stdlib.h> #include "WordCount.h" #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "device_launch_parameters.h" #include <sstream> #include <string> #include <fstream> #include <tchar.h> #include "string" #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void cudaMap(char *input, KeyValuePair *pairs) { kernMap << < GRID_SIZE, BLOCK_SIZE >> >(input, pairs); checkCUDAError("Map kernel failed!"); hipDeviceSynchronize(); } __global__ void kernMap(char *idata, KeyValuePair *pairs) { int ind = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = ind; i < NUM_INPUT; i += offset) { mapper(&idata[i], &pairs[i * NUM_KEYS]); } } __device__ void mapper(char *input, KeyValuePair *pairs) { pairs->key = 0; char ch = *input; if (ch == ' ' || ch == '\n' || ch == ',' || ch == '.') { pairs->value = 1; } else { pairs->value = 0; } } void cudaReduce(KeyValuePair *pairs, int *odata) { kernReduce << <GRID_SIZE, BLOCK_SIZE >> >(pairs, odata); checkCUDAError("Reduce kernel failed!"); hipDeviceSynchronize(); } __global__ void kernReduce(KeyValuePair *pairs, int *odata) { int ind = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = ind; i < NUM_OUTPUT; i += offset) { int startIndex = 0; int count = 0; int valueSize = 0; int j; for (j = 1; j < NUM_INPUT * NUM_KEYS; j++) { if (KVComparator()(pairs[j - 1], pairs[j])) { if (count == i) { break; } else { count++; startIndex = j; } } } valueSize = j - startIndex; reducer(pairs + startIndex, valueSize, &odata[i]); } } __device__ void reducer(KeyValuePair *pairs, int len, int* odata) { int sum = 0; for (KeyValuePair *pair = pairs; pair != pairs + len; pair++) { sum += pair->value; } *odata = sum; } void cudaMapReduce(char* input, int *output) { char* dev_idata; int* dev_odata; KeyValuePair *dev_intermediate; hipMalloc(&dev_idata, NUM_INPUT * sizeof(char)); hipMalloc(&dev_intermediate, NUM_INPUT * NUM_KEYS * sizeof(KeyValuePair)); hipMalloc(&dev_odata, NUM_OUTPUT * sizeof(int)); hipMemcpy(dev_idata, input, NUM_INPUT * sizeof(char), hipMemcpyHostToDevice); cudaMap(dev_idata, dev_intermediate); thrust::device_ptr<KeyValuePair> dev_ptr(dev_intermediate); thrust::sort(dev_ptr, dev_ptr + NUM_INPUT * NUM_KEYS, KVComparator()); cudaReduce(dev_intermediate, dev_odata); hipMemcpy(output, dev_odata, NUM_OUTPUT * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_idata); hipFree(dev_intermediate); hipFree(dev_odata); } int chars = 0, words = 0, lines = 1; char c; void CPUCounting(FILE *file) { while ((c = fgetc(file)) != EOF) { chars++; if (c != ' '&& c != ',' && c != '\n') { words++; while ((c = fgetc(file)) != EOF) { chars++; if (c == ' ' || c == ',' || c == '\n') { break; } } } else if (c == '\n') { lines++; } } } int main(int argc, char* argv[]) { //// read by line //std::ifstream infile("hamlet.txt"); //std::string line; //if (infile) { // while (std::getline(infile, line)) { // std::cout << line << std::endl; // } //} //else { // std::cout << "no such file" << std::endl; //} char* idata = new char[NUM_INPUT]; int* odata = new int[NUM_OUTPUT]; char* filename = "hamlet.txt"; FILE* fp; fp = fopen(filename, "r"); int i = 0; int ch; while (1) { if (fp == NULL) { printf("File didn't open"); break; } ch = fgetc(fp); if (ch == EOF) { break; } i++; idata[i] = ch; printf("%c", ch); } FILE* fp2; fp2 = fopen(filename, "r"); CPUCounting(fp2); if (fp != NULL) { fclose(fp); } if (fp2 != NULL) { fclose(fp); } cudaMapReduce(idata, odata); for (int i = 0; i < argc; i++) { std::cout << "CPU computing: " << std::endl; std::cout << "Total word count: " << words << std::endl; } for (int i = 0; i < NUM_OUTPUT; i++) { std::cout << "GPU computing: " << std::endl; std::cout << "Total word count: " << odata[i] << std::endl; } delete idata; delete odata; return 0; }
9fb011fb1feef666d21007f915db9c6d02e0410a.cu
#include <cstdio> #include <stdlib.h> #include <string> #include <cstdio> #include <iostream> #include <stdio.h> #include <stdlib.h> #include "WordCount.h" #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "device_launch_parameters.h" #include <sstream> #include <string> #include <fstream> #include <tchar.h> #include "string" #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void cudaMap(char *input, KeyValuePair *pairs) { kernMap << < GRID_SIZE, BLOCK_SIZE >> >(input, pairs); checkCUDAError("Map kernel failed!"); cudaDeviceSynchronize(); } __global__ void kernMap(char *idata, KeyValuePair *pairs) { int ind = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = ind; i < NUM_INPUT; i += offset) { mapper(&idata[i], &pairs[i * NUM_KEYS]); } } __device__ void mapper(char *input, KeyValuePair *pairs) { pairs->key = 0; char ch = *input; if (ch == ' ' || ch == '\n' || ch == ',' || ch == '.') { pairs->value = 1; } else { pairs->value = 0; } } void cudaReduce(KeyValuePair *pairs, int *odata) { kernReduce << <GRID_SIZE, BLOCK_SIZE >> >(pairs, odata); checkCUDAError("Reduce kernel failed!"); cudaDeviceSynchronize(); } __global__ void kernReduce(KeyValuePair *pairs, int *odata) { int ind = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = ind; i < NUM_OUTPUT; i += offset) { int startIndex = 0; int count = 0; int valueSize = 0; int j; for (j = 1; j < NUM_INPUT * NUM_KEYS; j++) { if (KVComparator()(pairs[j - 1], pairs[j])) { if (count == i) { break; } else { count++; startIndex = j; } } } valueSize = j - startIndex; reducer(pairs + startIndex, valueSize, &odata[i]); } } __device__ void reducer(KeyValuePair *pairs, int len, int* odata) { int sum = 0; for (KeyValuePair *pair = pairs; pair != pairs + len; pair++) { sum += pair->value; } *odata = sum; } void cudaMapReduce(char* input, int *output) { char* dev_idata; int* dev_odata; KeyValuePair *dev_intermediate; cudaMalloc(&dev_idata, NUM_INPUT * sizeof(char)); cudaMalloc(&dev_intermediate, NUM_INPUT * NUM_KEYS * sizeof(KeyValuePair)); cudaMalloc(&dev_odata, NUM_OUTPUT * sizeof(int)); cudaMemcpy(dev_idata, input, NUM_INPUT * sizeof(char), cudaMemcpyHostToDevice); cudaMap(dev_idata, dev_intermediate); thrust::device_ptr<KeyValuePair> dev_ptr(dev_intermediate); thrust::sort(dev_ptr, dev_ptr + NUM_INPUT * NUM_KEYS, KVComparator()); cudaReduce(dev_intermediate, dev_odata); cudaMemcpy(output, dev_odata, NUM_OUTPUT * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_idata); cudaFree(dev_intermediate); cudaFree(dev_odata); } int chars = 0, words = 0, lines = 1; char c; void CPUCounting(FILE *file) { while ((c = fgetc(file)) != EOF) { chars++; if (c != ' '&& c != ',' && c != '\n') { words++; while ((c = fgetc(file)) != EOF) { chars++; if (c == ' ' || c == ',' || c == '\n') { break; } } } else if (c == '\n') { lines++; } } } int main(int argc, char* argv[]) { //// read by line //std::ifstream infile("hamlet.txt"); //std::string line; //if (infile) { // while (std::getline(infile, line)) { // std::cout << line << std::endl; // } //} //else { // std::cout << "no such file" << std::endl; //} char* idata = new char[NUM_INPUT]; int* odata = new int[NUM_OUTPUT]; char* filename = "hamlet.txt"; FILE* fp; fp = fopen(filename, "r"); int i = 0; int ch; while (1) { if (fp == NULL) { printf("File didn't open"); break; } ch = fgetc(fp); if (ch == EOF) { break; } i++; idata[i] = ch; printf("%c", ch); } FILE* fp2; fp2 = fopen(filename, "r"); CPUCounting(fp2); if (fp != NULL) { fclose(fp); } if (fp2 != NULL) { fclose(fp); } cudaMapReduce(idata, odata); for (int i = 0; i < argc; i++) { std::cout << "CPU computing: " << std::endl; std::cout << "Total word count: " << words << std::endl; } for (int i = 0; i < NUM_OUTPUT; i++) { std::cout << "GPU computing: " << std::endl; std::cout << "Total word count: " << odata[i] << std::endl; } delete idata; delete odata; return 0; }
3866780522af661025411dd246a7526a21a6c7c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_lower( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } /* Similar to clat2z_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_upper( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } } /** Purpose ------- CLAT2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_clat2z_q( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( clat2z_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( clat2z_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda); } }
3866780522af661025411dd246a7526a21a6c7c8.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_lower( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } /* Similar to clat2z_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_upper( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } } /** Purpose ------- CLAT2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_clat2z_q( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); if (uplo == MagmaLower) { clat2z_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); } else if (uplo == MagmaUpper) { clat2z_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); } }
d53e2c328e4f50ac3ca0e0bc38784e4844d68c03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "LblLayer.cuh" #include "DataLayer.h" __global__ void getDiffData(float* targets, float* diffData, int label_count, int _batch_size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= _batch_size) return; const int label_value = static_cast<int>(targets[idx]); diffData[idx * label_count + label_value] -= 1; } void LblLayer::copytoDevData(float* imagesfloat, int imageID) { int inputsize = inchannel*inheight*inwidth; error->checkError(hipMemcpyAsync(ptrToOutData, &imagesfloat[imageID * inputsize], sizeof(float) * inputsize, hipMemcpyHostToDevice)); error->checkError(hipDeviceSynchronize()); } void LblLayer::copytoHostData(float* &imagesfloat) { int inputsize = inchannel*inheight*inwidth; imagesfloat = new float[inputsize]; error->checkError(hipMemcpyAsync(&imagesfloat[0], ptrToOutData, sizeof(float) *inputsize, hipMemcpyDeviceToHost)); } void LblLayer::printGrad(int dimension) { Layer::printGrad(dimension, "Lbl "); } int LblLayer::getTypeId() { return LayerID::LblLayer; } void LblLayer::copytoDevDiff(float*labelsfloat) { const float scalVal = 1.0f / static_cast<float>(baSize); float*lbl; hipMalloc(&lbl, sizeof(float) * baSize); error->checkError(hipMemcpyAsync(lbl, labelsfloat, sizeof(float)* baSize, hipMemcpyHostToDevice)); error->checkError(hipDeviceSynchronize()); error->checkError(hipMemcpyAsync(&ptrToGradData[0], prevLayer->ptrToOutData, sizeof(float)* baSize* prevLayer->outchannel, hipMemcpyDeviceToDevice)); error->checkError(hipDeviceSynchronize()); getDiffData << <baSize, 1 >> > (lbl, ptrToGradData, prevLayer->outchannel, baSize); error->checkError(hipDeviceSynchronize()); hipblasSscal(handle->cublasHandle,baSize *prevLayer->outchannel, &scalVal, ptrToGradData, 1); ptrToOutData = ptrToGradData; } void LblLayer::copytoHostLabelwComp(float* &labelsfloat) { int outputsize = outchannel*outheight*outwidth; labelsfloat = new float[outputsize*baSize]; error->checkError(hipMemcpyAsync(labelsfloat, ptrToGradData, sizeof(float)* baSize* prevLayer->outchannel, hipMemcpyDeviceToHost)); }
d53e2c328e4f50ac3ca0e0bc38784e4844d68c03.cu
#include "LblLayer.cuh" #include "DataLayer.h" __global__ void getDiffData(float* targets, float* diffData, int label_count, int _batch_size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= _batch_size) return; const int label_value = static_cast<int>(targets[idx]); diffData[idx * label_count + label_value] -= 1; } void LblLayer::copytoDevData(float* imagesfloat, int imageID) { int inputsize = inchannel*inheight*inwidth; error->checkError(cudaMemcpyAsync(ptrToOutData, &imagesfloat[imageID * inputsize], sizeof(float) * inputsize, cudaMemcpyHostToDevice)); error->checkError(cudaDeviceSynchronize()); } void LblLayer::copytoHostData(float* &imagesfloat) { int inputsize = inchannel*inheight*inwidth; imagesfloat = new float[inputsize]; error->checkError(cudaMemcpyAsync(&imagesfloat[0], ptrToOutData, sizeof(float) *inputsize, cudaMemcpyDeviceToHost)); } void LblLayer::printGrad(int dimension) { Layer::printGrad(dimension, "Lbl "); } int LblLayer::getTypeId() { return LayerID::LblLayer; } void LblLayer::copytoDevDiff(float*labelsfloat) { const float scalVal = 1.0f / static_cast<float>(baSize); float*lbl; cudaMalloc(&lbl, sizeof(float) * baSize); error->checkError(cudaMemcpyAsync(lbl, labelsfloat, sizeof(float)* baSize, cudaMemcpyHostToDevice)); error->checkError(cudaDeviceSynchronize()); error->checkError(cudaMemcpyAsync(&ptrToGradData[0], prevLayer->ptrToOutData, sizeof(float)* baSize* prevLayer->outchannel, cudaMemcpyDeviceToDevice)); error->checkError(cudaDeviceSynchronize()); getDiffData << <baSize, 1 >> > (lbl, ptrToGradData, prevLayer->outchannel, baSize); error->checkError(cudaDeviceSynchronize()); cublasSscal_v2(handle->cublasHandle,baSize *prevLayer->outchannel, &scalVal, ptrToGradData, 1); ptrToOutData = ptrToGradData; } void LblLayer::copytoHostLabelwComp(float* &labelsfloat) { int outputsize = outchannel*outheight*outwidth; labelsfloat = new float[outputsize*baSize]; error->checkError(cudaMemcpyAsync(labelsfloat, ptrToGradData, sizeof(float)* baSize* prevLayer->outchannel, cudaMemcpyDeviceToHost)); }
dde12580b45fd4203034d617e0587ca75847b4e4.hip
// !!! This is a file automatically generated by hipify!!! // Program corresponding to CythonBM.cu that can be run directly from the command lin. For testing purposes. #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <ctime> #include "book.h" // Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/hiprand/host-api-overview.html#generator-options #define CUDA_CALL(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) int *failCross = nullptr; //Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier __global__ void randomWalk(double *results, int *crossTimes, double T, int N, int numSims, double lowerThreshold, double upperThreshold, int *dev_failCross, double seconds) { int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x; if (crossTimeIndex < numSims) { hiprandState_t state; hiprand_init (blockIdx.x * 1000 + threadIdx.x + seconds, 0, 0, &state); double random; int start = (threadIdx.x + blockIdx.x * blockDim.x) * N; bool crossed = false; crossTimes[crossTimeIndex] = 0; results[start] = 0.0; for (int j = start + 1; j < start + N; j++) { random = hiprand_normal_double(&state); results[j] = results[j-1] + random * sqrt((double) T / N); if (results[j] >= upperThreshold && !crossed) { crossTimes[crossTimeIndex] = j - start; crossed = true; } else if (results[j] <= lowerThreshold && !crossed) { crossTimes[crossTimeIndex] = -1 * (j - start); crossed = true; } } if (!crossed) { atomicAdd(dev_failCross, 1); } } /* Generate 2 doubles at once. Test later to see if this is more efficient: double hiprand_normal2_double (state); */ } int *getFailCross() { return failCross; } double getAverage(int* array, int numSims) { double sum = 0; int size = numSims; int nonZero = 0; for(int i = 0; i < size; i++) { if (array[i] != 0) { sum += abs(array[i]); nonZero++; } } return sum/nonZero; } int main() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); time_t timer; struct tm y2k = {0}; double seconds; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time(&timer); /* get current time; same as: timer = time(NULL) */ seconds = difftime(timer,mktime(&y2k)); //Arrays to store the brownian path, one for the host and one for the device int N = 500; double T = 1; int numSims = 1; int numBlocks = (511 + numSims) / numSims; int numThreads = 512; double lowerThreshold = -1; double upperThreshold = 1; double *results = new double[N * numSims]; double *dev_results; failCross = new int; int *dev_failCross; int *crossTimes = new int[numSims]; int *dev_crossTimes; // Allocate space for results array on device hipMalloc(&dev_results, N * numSims * sizeof(double)); hipMalloc(&dev_crossTimes, numSims * sizeof(int)); hipMalloc(&dev_failCross, sizeof(dev_failCross)); //Call GPU function hipLaunchKernelGGL(( randomWalk), dim3(numBlocks), dim3(numThreads), 0, 0, dev_results, dev_crossTimes, T, N, numSims, lowerThreshold, upperThreshold, dev_failCross, seconds); //copy results array from device to host hipMemcpy(results, dev_results , N * numSims * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(crossTimes, dev_crossTimes, numSims * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(failCross, dev_failCross, sizeof(dev_failCross), hipMemcpyDeviceToHost); hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %3.1f ms/n", elapsedTime); hipEventDestroy(start); hipEventDestroy(stop); printf("\n"); // print out path for (int i=0; i < (N * numSims); i++) { printf("%f ", results[i]); if (i == N - 1) { printf("\n"); printf("\n"); } } printf("\n"); printf("\n"); printf("\n"); /* // print out cross times for (int i=0; i < numSims; i++) { printf("%d ", crossTimes[i]); } printf("\n"); printf("\n"); printf("\n"); int x = *getFailCross(); printf("%d ", x); printf("\n"); printf("\n"); printf("\n"); printf("Average crossing time: %f ", getAverage(crossTimes, numSims)); printf("\n"); int x = *getFailCross(); printf("Number that failed to cross: %d ", x); printf("\n"); printf("\n"); */ //clean up hipFree(dev_results); hipFree(dev_crossTimes); hipFree(dev_failCross); return 0; }
dde12580b45fd4203034d617e0587ca75847b4e4.cu
// Program corresponding to CythonBM.cu that can be run directly from the command lin. For testing purposes. #include <curand_kernel.h> #include <stdio.h> #include <cuda.h> #include <cmath> #include <ctime> #include "book.h" // Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/curand/host-api-overview.html#generator-options #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) int *failCross = nullptr; //Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier __global__ void randomWalk(double *results, int *crossTimes, double T, int N, int numSims, double lowerThreshold, double upperThreshold, int *dev_failCross, double seconds) { int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x; if (crossTimeIndex < numSims) { curandState_t state; curand_init (blockIdx.x * 1000 + threadIdx.x + seconds, 0, 0, &state); double random; int start = (threadIdx.x + blockIdx.x * blockDim.x) * N; bool crossed = false; crossTimes[crossTimeIndex] = 0; results[start] = 0.0; for (int j = start + 1; j < start + N; j++) { random = curand_normal_double(&state); results[j] = results[j-1] + random * sqrt((double) T / N); if (results[j] >= upperThreshold && !crossed) { crossTimes[crossTimeIndex] = j - start; crossed = true; } else if (results[j] <= lowerThreshold && !crossed) { crossTimes[crossTimeIndex] = -1 * (j - start); crossed = true; } } if (!crossed) { atomicAdd(dev_failCross, 1); } } /* Generate 2 doubles at once. Test later to see if this is more efficient: double curand_normal2_double (state); */ } int *getFailCross() { return failCross; } double getAverage(int* array, int numSims) { double sum = 0; int size = numSims; int nonZero = 0; for(int i = 0; i < size; i++) { if (array[i] != 0) { sum += abs(array[i]); nonZero++; } } return sum/nonZero; } int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); time_t timer; struct tm y2k = {0}; double seconds; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time(&timer); /* get current time; same as: timer = time(NULL) */ seconds = difftime(timer,mktime(&y2k)); //Arrays to store the brownian path, one for the host and one for the device int N = 500; double T = 1; int numSims = 1; int numBlocks = (511 + numSims) / numSims; int numThreads = 512; double lowerThreshold = -1; double upperThreshold = 1; double *results = new double[N * numSims]; double *dev_results; failCross = new int; int *dev_failCross; int *crossTimes = new int[numSims]; int *dev_crossTimes; // Allocate space for results array on device cudaMalloc(&dev_results, N * numSims * sizeof(double)); cudaMalloc(&dev_crossTimes, numSims * sizeof(int)); cudaMalloc(&dev_failCross, sizeof(dev_failCross)); //Call GPU function randomWalk<<<numBlocks, numThreads>>>(dev_results, dev_crossTimes, T, N, numSims, lowerThreshold, upperThreshold, dev_failCross, seconds); //copy results array from device to host cudaMemcpy(results, dev_results , N * numSims * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(crossTimes, dev_crossTimes, numSims * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(failCross, dev_failCross, sizeof(dev_failCross), cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %3.1f ms/n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); printf("\n"); // print out path for (int i=0; i < (N * numSims); i++) { printf("%f ", results[i]); if (i == N - 1) { printf("\n"); printf("\n"); } } printf("\n"); printf("\n"); printf("\n"); /* // print out cross times for (int i=0; i < numSims; i++) { printf("%d ", crossTimes[i]); } printf("\n"); printf("\n"); printf("\n"); int x = *getFailCross(); printf("%d ", x); printf("\n"); printf("\n"); printf("\n"); printf("Average crossing time: %f ", getAverage(crossTimes, numSims)); printf("\n"); int x = *getFailCross(); printf("Number that failed to cross: %d ", x); printf("\n"); printf("\n"); */ //clean up cudaFree(dev_results); cudaFree(dev_crossTimes); cudaFree(dev_failCross); return 0; }
0fe7dda9878da9a4cef636f4537e02c07788927f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CudaFlow.h" __global__ void SolveDataL1InpaintKernel(const float *duhat0, const float *dvhat0, const float *mask0, const float *mask1, const float *pu1, const float *pu2, const float *pv1, const float *pv2, const float *Ix, const float *Iy, const float *It, int width, int height, int stride, float lambda, float theta, float *duhat1, float *dvhat1) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row float dix, diy, dit, duhat, dvhat, du, dv; if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; // current pixel index dix = Ix[pos]; diy = Iy[pos]; dit = It[pos]; float duhat = duhat0[pos]; float dvhat = dvhat0[pos]; float dmask0 = mask0[pos]; //problem 1a float rho = (dix*duhat + diy*dvhat + dit); float upper = lambda*theta*(dix*dix + diy*diy); float lower = -lambda*theta*(dix*dix + diy*diy);; if ((rho <= upper) && (rho >= lower)) { float magi = dix*dix + diy*diy; if (magi != 0) { du = duhat - rho*dix / magi; dv = dvhat - rho*diy / magi; } else { du = duhat; dv = dvhat; } } else if (rho < lower) { du = duhat + lambda*theta*dix; dv = dvhat + lambda*theta*diy; } else if (rho > upper) { du = duhat - lambda*theta*dix; dv = dvhat - lambda*theta*diy; } if (dmask0 == 0.0f) { du = duhat; dv = dvhat; } //problem 1b float divpu, divpv; int left = (ix - 1) + iy * stride; int right = (ix + 1) + iy * stride; int down = ix + (iy - 1) * stride; int up = ix + (iy + 1) * stride; if ((ix - 1) < 0) { if ((iy - 1) < 0) { //divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos]; //divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos]; divpu = pu1[pos] + pu2[pos]; divpv = pv1[pos] + pv2[pos]; } else { //divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down]; //divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down]; divpu = pu1[pos] + pu2[pos] - pu2[down]; divpv = pv1[pos] + pv2[pos] - pv2[down]; } } else { if ((iy - 1) < 0) { //divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos]; //divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos]; divpu = pu1[pos] - pu1[left] + pu2[pos]; divpv = pv1[pos] - pv1[left] + pv2[pos]; } else { divpu = pu1[pos] - pu1[left] + pu2[pos] - pu2[down]; divpv = pv1[pos] - pv1[left] + pv2[pos] - pv2[down]; } } duhat1[pos] = du + theta*divpu; dvhat1[pos] = dv + theta*divpv; } } void CudaFlow::SolveDataL1Inpaint(const float *duhat0, const float *dvhat0, const float *mask0, const float *mask1, const float *pu1, const float *pu2, const float *pv1, const float *pv2, const float *Ix, const float *Iy, const float *Iz, int w, int h, int s, float lambda, float theta, float *duhat1, float *dvhat1) { // CTA size dim3 threads(BlockWidth, BlockHeight); // grid size dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); SolveDataL1InpaintKernel << < blocks, threads >> > (duhat0, dvhat0, mask0, mask1, pu1, pu2, pv1, pv2, Ix, Iy, Iz, w, h, s, lambda, theta, duhat1, dvhat1); }
0fe7dda9878da9a4cef636f4537e02c07788927f.cu
#include "CudaFlow.h" __global__ void SolveDataL1InpaintKernel(const float *duhat0, const float *dvhat0, const float *mask0, const float *mask1, const float *pu1, const float *pu2, const float *pv1, const float *pv2, const float *Ix, const float *Iy, const float *It, int width, int height, int stride, float lambda, float theta, float *duhat1, float *dvhat1) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row float dix, diy, dit, duhat, dvhat, du, dv; if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; // current pixel index dix = Ix[pos]; diy = Iy[pos]; dit = It[pos]; float duhat = duhat0[pos]; float dvhat = dvhat0[pos]; float dmask0 = mask0[pos]; //problem 1a float rho = (dix*duhat + diy*dvhat + dit); float upper = lambda*theta*(dix*dix + diy*diy); float lower = -lambda*theta*(dix*dix + diy*diy);; if ((rho <= upper) && (rho >= lower)) { float magi = dix*dix + diy*diy; if (magi != 0) { du = duhat - rho*dix / magi; dv = dvhat - rho*diy / magi; } else { du = duhat; dv = dvhat; } } else if (rho < lower) { du = duhat + lambda*theta*dix; dv = dvhat + lambda*theta*diy; } else if (rho > upper) { du = duhat - lambda*theta*dix; dv = dvhat - lambda*theta*diy; } if (dmask0 == 0.0f) { du = duhat; dv = dvhat; } //problem 1b float divpu, divpv; int left = (ix - 1) + iy * stride; int right = (ix + 1) + iy * stride; int down = ix + (iy - 1) * stride; int up = ix + (iy + 1) * stride; if ((ix - 1) < 0) { if ((iy - 1) < 0) { //divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos]; //divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos]; divpu = pu1[pos] + pu2[pos]; divpv = pv1[pos] + pv2[pos]; } else { //divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down]; //divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down]; divpu = pu1[pos] + pu2[pos] - pu2[down]; divpv = pv1[pos] + pv2[pos] - pv2[down]; } } else { if ((iy - 1) < 0) { //divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos]; //divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos]; divpu = pu1[pos] - pu1[left] + pu2[pos]; divpv = pv1[pos] - pv1[left] + pv2[pos]; } else { divpu = pu1[pos] - pu1[left] + pu2[pos] - pu2[down]; divpv = pv1[pos] - pv1[left] + pv2[pos] - pv2[down]; } } duhat1[pos] = du + theta*divpu; dvhat1[pos] = dv + theta*divpv; } } void CudaFlow::SolveDataL1Inpaint(const float *duhat0, const float *dvhat0, const float *mask0, const float *mask1, const float *pu1, const float *pu2, const float *pv1, const float *pv2, const float *Ix, const float *Iy, const float *Iz, int w, int h, int s, float lambda, float theta, float *duhat1, float *dvhat1) { // CTA size dim3 threads(BlockWidth, BlockHeight); // grid size dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); SolveDataL1InpaintKernel << < blocks, threads >> > (duhat0, dvhat0, mask0, mask1, pu1, pu2, pv1, pv2, Ix, Iy, Iz, w, h, s, lambda, theta, duhat1, dvhat1); }
56404572ff31909ccbc92a0d3ccfa58a8808d438.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void add( int *a, int *b, int *c, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid; // HERE tid = blockIdx.x * blockDim.x + threadIdx.x; // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ // Compute the addition // HERE c[tid] = a[tid] + b[tid]; } return; } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments if (argc <= 2) { // Tell the user how to run the program printf ("Usage: %s vector_size block_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); int grid_size = ((vector_size-1)/block_size) + 1; // Set device that we will use for our cuda code hipSetDevice(0); // Time Variables hipEvent_t start, stop; float time; hipEventCreate (&start); hipEventCreate (&stop); // Input Arrays and variables int *a = new int [vector_size]; int *b = new int [vector_size]; int *c_cpu = new int [vector_size]; int *c_gpu = new int [vector_size]; // Pointers in GPU memory int *dev_a; int *dev_b; int *dev_c; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { a[i] = rand()%10; b[i] = rand()%10; } // // CPU Calculation ////////////////// printf("Running sequential job.\n"); hipEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { c_cpu[i] = a[i] + b[i]; } hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); // allocate the memory on the GPU // HERE hipMalloc(&dev_a, vector_size*sizeof(int)); hipMalloc(&dev_b, vector_size*sizeof(int)); hipMalloc(&dev_c, vector_size*sizeof(int)); // copy the arrays 'a' and 'b' to the GPU // HERE hipMemcpy(dev_a, a, vector_size*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, vector_size*sizeof(int), hipMemcpyHostToDevice); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); hipEventRecord(start,0); // call the kernel // HERE hipLaunchKernelGGL(( add), dim3(grid_size), dim3(block_size), 0, 0, dev_a, dev_b, dev_c, vector_size); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // copy the array 'c' back from the GPU to the CPU // HERE (there's one more at the end, don't miss it!) hipMemcpy(c_gpu, dev_c, vector_size*sizeof(int), hipMemcpyDeviceToHost); // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data free (a); free (b); free (c_cpu); free (c_gpu); // free the memory allocated on the GPU // HERE hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
56404572ff31909ccbc92a0d3ccfa58a8808d438.cu
#include <stdio.h> #include <stdlib.h> __global__ void add( int *a, int *b, int *c, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid; // HERE tid = blockIdx.x * blockDim.x + threadIdx.x; // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ // Compute the addition // HERE c[tid] = a[tid] + b[tid]; } return; } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments if (argc <= 2) { // Tell the user how to run the program printf ("Usage: %s vector_size block_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); int grid_size = ((vector_size-1)/block_size) + 1; // Set device that we will use for our cuda code cudaSetDevice(0); // Time Variables cudaEvent_t start, stop; float time; cudaEventCreate (&start); cudaEventCreate (&stop); // Input Arrays and variables int *a = new int [vector_size]; int *b = new int [vector_size]; int *c_cpu = new int [vector_size]; int *c_gpu = new int [vector_size]; // Pointers in GPU memory int *dev_a; int *dev_b; int *dev_c; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { a[i] = rand()%10; b[i] = rand()%10; } // // CPU Calculation ////////////////// printf("Running sequential job.\n"); cudaEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { c_cpu[i] = a[i] + b[i]; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); // allocate the memory on the GPU // HERE cudaMalloc(&dev_a, vector_size*sizeof(int)); cudaMalloc(&dev_b, vector_size*sizeof(int)); cudaMalloc(&dev_c, vector_size*sizeof(int)); // copy the arrays 'a' and 'b' to the GPU // HERE cudaMemcpy(dev_a, a, vector_size*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, vector_size*sizeof(int), cudaMemcpyHostToDevice); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); cudaEventRecord(start,0); // call the kernel // HERE add<<<grid_size, block_size>>>(dev_a, dev_b, dev_c, vector_size); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // copy the array 'c' back from the GPU to the CPU // HERE (there's one more at the end, don't miss it!) cudaMemcpy(c_gpu, dev_c, vector_size*sizeof(int), cudaMemcpyDeviceToHost); // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data free (a); free (b); free (c_cpu); free (c_gpu); // free the memory allocated on the GPU // HERE cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
24b21274f981198e1d8701b923edfe604634cf6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> #include <vector> #include <iostream> #include <sstream> #include <fstream> #include <iterator> #include <CLI11.hpp> #define gpuErrchk(ans) { gpuAssert( (ans), __FILE__, __LINE__ ); } inline void gpuAssert( hipError_t code, const char * file, int line, bool abort = true ) { if ( hipSuccess != code ) { fprintf( stderr, "\nGPUassert: %s %s %d\n", hipGetErrorString( code ), file, line ); if ( abort ) exit( code ); } return; } typedef uint cell; namespace cuda_kernels{ __host__ __device__ int index_from_coordinates(int column, int row, int level, int length, int width, int heighth) { return ((column + length) % length) + ((row + width) % width) * length + ((level + heighth) % heighth) * length * width; } __host__ __device__ int calc_neighbours(cell* grid, int column, int row, int level, int length, int width, int heighth) { return grid[index_from_coordinates(column + -1, row + -1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + -1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + -1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 0, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 0, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 0, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + -1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + -1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + -1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 0, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 0, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + -1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + -1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + -1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 0, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 0, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 0, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 1, level + 1, length, width, heighth)]; } __global__ void calc_next_generation_all_global(cell* current_grid, cell* next_grid, int length, int width, int heighth, int llive, int rlive, int lborn, int rborn) { // Call to global memory approx 28 times per cell - very slow int column = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int level = blockIdx.z * blockDim.z + threadIdx.z; int place = index_from_coordinates(column, row, level, length, width, heighth); if (column < length and row < width and level < heighth) { cell state = current_grid[place]; // slow int neighbours = calc_neighbours(current_grid, column, row, level, length, width, heighth); // 26 * slow if ((state == 0 and neighbours >= lborn and neighbours <= rborn) or (state == 1 and neighbours >= llive and neighbours <= rlive)) { next_grid[place] = 1; } else { next_grid[place] = 0; } } } __global__ void calc_next_generation_shared_areas(cell* current_grid, cell* next_grid, int length, int width, int heighth, int llive, int rlive, int lborn, int rborn) { // Call to global memory approx 2 times per cell - fast int column = blockIdx.x * (blockDim.x - 2) + threadIdx.x - 1; int row = blockIdx.y * (blockDim.y - 2) + threadIdx.y - 1; int level = blockIdx.z * (blockDim.z - 2) + threadIdx.z - 1; if (column < length + 1 and row < width + 1 and level < heighth + 1) { int place = index_from_coordinates(column, row, level, length, width, heighth); __shared__ cell area[8192]; // 32kb of shared memory area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = current_grid[place]; // slow __syncthreads(); // now we can calculate neighbours fast in a slightly smaller area if (threadIdx.x > 0 and threadIdx.x < blockDim.x - 1 and threadIdx.y > 0 and threadIdx.y < blockDim.y - 1 and threadIdx.z > 0 and threadIdx.z < blockDim.z - 1 and column != length and row != width and level != heighth) { int neighbours; neighbours = calc_neighbours(area, threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z); // fast cell state = area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y]; if ((state == 0 and neighbours >= lborn and neighbours <= rborn) or (state == 1 and neighbours >= llive and neighbours <= rlive)) { next_grid[place] = 1; } else { next_grid[place] = 0; } } } } __global__ void calculate_n_generations(int n, cell* current_grid, cell* next_grid, int length, int width, int heighth, int llive, int rlive, int lborn, int rborn) { //copy areas for each block into it's shared memory and then calculate neighbours int column = blockIdx.x * (blockDim.x - 2) + threadIdx.x - 1; int row = blockIdx.y * (blockDim.y - 2) + threadIdx.y - 1; int level = blockIdx.z * (blockDim.z - 2) + threadIdx.z - 1; if (column < length + 1 and row < width + 1 and level < heighth + 1) { int place = index_from_coordinates(column, row, level, length, width, heighth); __shared__ cell area[8192]; // 32kb of shared memory if (threadIdx.x > 0 and threadIdx.x < blockDim.x - 1 and threadIdx.y > 0 and threadIdx.y < blockDim.y - 1 and threadIdx.z > 0 and threadIdx.z < blockDim.z - 1 and column != length and row != width and level != heighth) { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = current_grid[place]; // pull inner cells } int neighbours; int state; cell* now = current_grid; cell* next = next_grid; cell* temp; for (int i = 0; i < n; ++i) { if (threadIdx.x == 0 or threadIdx.x == blockDim.x - 1 or threadIdx.y == 0 or threadIdx.y == blockDim.y - 1 or threadIdx.z == 0 or threadIdx.z == blockDim.z - 1 or column == length or row == width or level == heighth) { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = now[place]; // pull outer cells } __syncthreads(); neighbours = calc_neighbours(area, threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z); // fast state = area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y]; if ((state == 0 and neighbours >= lborn and neighbours <= rborn) or (state == 1 and neighbours >= llive and neighbours <= rlive)) { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = 1; } else { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = 0; } if (threadIdx.x == 1 or threadIdx.x == blockDim.x - 2 or column == length - 1 or threadIdx.y == 1 or threadIdx.y == blockDim.y - 2 or row == width - 1 or threadIdx.z == 1 or threadIdx.z == blockDim.z - 2 or level == heighth - 1) { next[place] = area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y]; } temp = now; now = next; next = temp; __syncthreads(); } } } } void read_size(std::string filename, int* length, int* width, int* heighth){ std::ifstream data(filename); std::string line; std::getline(data, line); std::stringstream lineStream(line); std::vector<std::string> parsedRow; std::string cell; while(std::getline(lineStream, cell, ' ')) { parsedRow.push_back(cell); } *length = std::stoi(parsedRow[0]); *width = std::stoi(parsedRow[1]); *heighth = std::stoi(parsedRow[2]); } void read_input(std::string filename, cell* field_h){ std::ifstream data(filename); std::string line; std::getline(data, line); std::stringstream lineStream(line); std::vector<std::string> parsedRow; std::string cell; while(std::getline(lineStream, cell, ' ')) { parsedRow.push_back(cell); } int length = std::stoi(parsedRow[0]); int width = std::stoi(parsedRow[1]); int heighth = std::stoi(parsedRow[2]); while(std::getline(data,line)) { std::stringstream lineStream(line); std::string cell; std::vector<std::string> parsedRow; while(std::getline(lineStream, cell ,' ')) { parsedRow.push_back(cell); } if (std::stoi(parsedRow[0]) != 0 or std::stoi(parsedRow[1]) != 0 or std::stoi(parsedRow[2]) != 0 or std::stoi(parsedRow[3]) != 0) { field_h[std::stoi(parsedRow[1]) + std::stoi(parsedRow[2]) * length + std::stoi(parsedRow[3]) * length * width] = std::stoi(parsedRow[0]); } } } void append_state_to_file(std::string filename, cell* field_d, int length, int width, int heighth){ cell field_h[length * width * heighth]; size_t size = length * width * heighth * sizeof(int); hipMemcpy(field_h, field_d, size, hipMemcpyDeviceToHost); std::ofstream file; file.open(filename, std::ios_base::app); for (int i = 0; i < length; ++i) { for (int j = 0; j < width; ++j) { for (int k = 0; k < heighth; ++k) { int place = i + j * length + k * length * width; if (field_h[place] != 0){ file << field_h[place] << " " << i << " " << j << " " << k << "\n"; } } } } file << "0 0 0 0\n"; } int main(int argc, char** argv){ CLI::App app{"Cuda game of life"}; std::string input_filename = "glider.in"; app.add_option("-i,--input", input_filename, "Input filename"); std::string output_filename = "test.out"; app.add_option("-o,--output", output_filename, "Output filename"); int g = 100; app.add_option("-g,--generations", g, "Number of generations to calculate"); dim3 threads_per_block(10, 10, 10); //app.add_option("-b,--block", threads_per_block, "Number of threads per block (3D)"); std::vector<int> rules = {5, 7, 6, 7}; app.add_option("-r,--rules", rules, "Number of threads per block (3D)"); int type = 1; app.add_option("-t,--type", type, "0 - all global, 1 - shared areas, 2 - continuous shared"); int skip = 1; app.add_option("-s,--skip", skip, "how often to write state (only for continuous shared)"); CLI11_PARSE(app, argc, argv); int length, width, heighth; read_size(input_filename, &length, &width, &heighth); cell field_h[length * width * heighth]; for (int i = 0; i < length; ++i) { for (int j = 0; j < width; ++j) { for (int k = 0; k < heighth; ++k) { field_h[i + j * length + k * length * width] = 0; } } } read_input(input_filename, field_h); cell* field0_d; cell* field1_d; size_t size = length * width * heighth * sizeof(cell); gpuErrchk(hipMalloc(&field0_d, size)); gpuErrchk(hipMalloc(&field1_d, size)); gpuErrchk(hipMemcpy(field0_d, field_h, size, hipMemcpyHostToDevice)); std::ofstream ofs; ofs.open(output_filename, std::ofstream::out | std::ofstream::trunc); ofs.close(); dim3 bpg(length / threads_per_block.x + 1, width / threads_per_block.y + 1, heighth / threads_per_block.z + 1); for (int i = 0; i < g; ++i) { append_state_to_file(output_filename, field0_d, length, width, heighth); switch (type) { case 0: hipLaunchKernelGGL(( cuda_kernels::calc_next_generation_all_global), dim3(bpg), dim3(threads_per_block), 0, 0, field0_d, field1_d, length, width, heighth, rules[0], rules[1], rules[2], rules[3]); break; case 1: hipLaunchKernelGGL(( cuda_kernels::calc_next_generation_shared_areas), dim3(bpg), dim3(threads_per_block), 0, 0, field0_d, field1_d, length, width, heighth, rules[0], rules[1], rules[2], rules[3]); break; case 2: hipLaunchKernelGGL(( cuda_kernels::calculate_n_generations), dim3(bpg), dim3(threads_per_block), 0, 0, skip, field0_d, field1_d, length, width, heighth, rules[0], rules[1], rules[2], rules[3]); break; } gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); std::swap(field0_d, field1_d); } return 0; }
24b21274f981198e1d8701b923edfe604634cf6d.cu
#include <string> #include <vector> #include <iostream> #include <sstream> #include <fstream> #include <iterator> #include <CLI11.hpp> #define gpuErrchk(ans) { gpuAssert( (ans), __FILE__, __LINE__ ); } inline void gpuAssert( cudaError_t code, const char * file, int line, bool abort = true ) { if ( cudaSuccess != code ) { fprintf( stderr, "\nGPUassert: %s %s %d\n", cudaGetErrorString( code ), file, line ); if ( abort ) exit( code ); } return; } typedef uint cell; namespace cuda_kernels{ __host__ __device__ int index_from_coordinates(int column, int row, int level, int length, int width, int heighth) { return ((column + length) % length) + ((row + width) % width) * length + ((level + heighth) % heighth) * length * width; } __host__ __device__ int calc_neighbours(cell* grid, int column, int row, int level, int length, int width, int heighth) { return grid[index_from_coordinates(column + -1, row + -1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + -1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + -1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 0, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 0, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 0, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + -1, row + 1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + -1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + -1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + -1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 0, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 0, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 0, row + 1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + -1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + -1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + -1, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 0, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 0, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 0, level + 1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 1, level + -1, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 1, level + 0, length, width, heighth)] + grid[index_from_coordinates(column + 1, row + 1, level + 1, length, width, heighth)]; } __global__ void calc_next_generation_all_global(cell* current_grid, cell* next_grid, int length, int width, int heighth, int llive, int rlive, int lborn, int rborn) { // Call to global memory approx 28 times per cell - very slow int column = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int level = blockIdx.z * blockDim.z + threadIdx.z; int place = index_from_coordinates(column, row, level, length, width, heighth); if (column < length and row < width and level < heighth) { cell state = current_grid[place]; // slow int neighbours = calc_neighbours(current_grid, column, row, level, length, width, heighth); // 26 * slow if ((state == 0 and neighbours >= lborn and neighbours <= rborn) or (state == 1 and neighbours >= llive and neighbours <= rlive)) { next_grid[place] = 1; } else { next_grid[place] = 0; } } } __global__ void calc_next_generation_shared_areas(cell* current_grid, cell* next_grid, int length, int width, int heighth, int llive, int rlive, int lborn, int rborn) { // Call to global memory approx 2 times per cell - fast int column = blockIdx.x * (blockDim.x - 2) + threadIdx.x - 1; int row = blockIdx.y * (blockDim.y - 2) + threadIdx.y - 1; int level = blockIdx.z * (blockDim.z - 2) + threadIdx.z - 1; if (column < length + 1 and row < width + 1 and level < heighth + 1) { int place = index_from_coordinates(column, row, level, length, width, heighth); __shared__ cell area[8192]; // 32kb of shared memory area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = current_grid[place]; // slow __syncthreads(); // now we can calculate neighbours fast in a slightly smaller area if (threadIdx.x > 0 and threadIdx.x < blockDim.x - 1 and threadIdx.y > 0 and threadIdx.y < blockDim.y - 1 and threadIdx.z > 0 and threadIdx.z < blockDim.z - 1 and column != length and row != width and level != heighth) { int neighbours; neighbours = calc_neighbours(area, threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z); // fast cell state = area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y]; if ((state == 0 and neighbours >= lborn and neighbours <= rborn) or (state == 1 and neighbours >= llive and neighbours <= rlive)) { next_grid[place] = 1; } else { next_grid[place] = 0; } } } } __global__ void calculate_n_generations(int n, cell* current_grid, cell* next_grid, int length, int width, int heighth, int llive, int rlive, int lborn, int rborn) { //copy areas for each block into it's shared memory and then calculate neighbours int column = blockIdx.x * (blockDim.x - 2) + threadIdx.x - 1; int row = blockIdx.y * (blockDim.y - 2) + threadIdx.y - 1; int level = blockIdx.z * (blockDim.z - 2) + threadIdx.z - 1; if (column < length + 1 and row < width + 1 and level < heighth + 1) { int place = index_from_coordinates(column, row, level, length, width, heighth); __shared__ cell area[8192]; // 32kb of shared memory if (threadIdx.x > 0 and threadIdx.x < blockDim.x - 1 and threadIdx.y > 0 and threadIdx.y < blockDim.y - 1 and threadIdx.z > 0 and threadIdx.z < blockDim.z - 1 and column != length and row != width and level != heighth) { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = current_grid[place]; // pull inner cells } int neighbours; int state; cell* now = current_grid; cell* next = next_grid; cell* temp; for (int i = 0; i < n; ++i) { if (threadIdx.x == 0 or threadIdx.x == blockDim.x - 1 or threadIdx.y == 0 or threadIdx.y == blockDim.y - 1 or threadIdx.z == 0 or threadIdx.z == blockDim.z - 1 or column == length or row == width or level == heighth) { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = now[place]; // pull outer cells } __syncthreads(); neighbours = calc_neighbours(area, threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z); // fast state = area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y]; if ((state == 0 and neighbours >= lborn and neighbours <= rborn) or (state == 1 and neighbours >= llive and neighbours <= rlive)) { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = 1; } else { area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y] = 0; } if (threadIdx.x == 1 or threadIdx.x == blockDim.x - 2 or column == length - 1 or threadIdx.y == 1 or threadIdx.y == blockDim.y - 2 or row == width - 1 or threadIdx.z == 1 or threadIdx.z == blockDim.z - 2 or level == heighth - 1) { next[place] = area[threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y]; } temp = now; now = next; next = temp; __syncthreads(); } } } } void read_size(std::string filename, int* length, int* width, int* heighth){ std::ifstream data(filename); std::string line; std::getline(data, line); std::stringstream lineStream(line); std::vector<std::string> parsedRow; std::string cell; while(std::getline(lineStream, cell, ' ')) { parsedRow.push_back(cell); } *length = std::stoi(parsedRow[0]); *width = std::stoi(parsedRow[1]); *heighth = std::stoi(parsedRow[2]); } void read_input(std::string filename, cell* field_h){ std::ifstream data(filename); std::string line; std::getline(data, line); std::stringstream lineStream(line); std::vector<std::string> parsedRow; std::string cell; while(std::getline(lineStream, cell, ' ')) { parsedRow.push_back(cell); } int length = std::stoi(parsedRow[0]); int width = std::stoi(parsedRow[1]); int heighth = std::stoi(parsedRow[2]); while(std::getline(data,line)) { std::stringstream lineStream(line); std::string cell; std::vector<std::string> parsedRow; while(std::getline(lineStream, cell ,' ')) { parsedRow.push_back(cell); } if (std::stoi(parsedRow[0]) != 0 or std::stoi(parsedRow[1]) != 0 or std::stoi(parsedRow[2]) != 0 or std::stoi(parsedRow[3]) != 0) { field_h[std::stoi(parsedRow[1]) + std::stoi(parsedRow[2]) * length + std::stoi(parsedRow[3]) * length * width] = std::stoi(parsedRow[0]); } } } void append_state_to_file(std::string filename, cell* field_d, int length, int width, int heighth){ cell field_h[length * width * heighth]; size_t size = length * width * heighth * sizeof(int); cudaMemcpy(field_h, field_d, size, cudaMemcpyDeviceToHost); std::ofstream file; file.open(filename, std::ios_base::app); for (int i = 0; i < length; ++i) { for (int j = 0; j < width; ++j) { for (int k = 0; k < heighth; ++k) { int place = i + j * length + k * length * width; if (field_h[place] != 0){ file << field_h[place] << " " << i << " " << j << " " << k << "\n"; } } } } file << "0 0 0 0\n"; } int main(int argc, char** argv){ CLI::App app{"Cuda game of life"}; std::string input_filename = "glider.in"; app.add_option("-i,--input", input_filename, "Input filename"); std::string output_filename = "test.out"; app.add_option("-o,--output", output_filename, "Output filename"); int g = 100; app.add_option("-g,--generations", g, "Number of generations to calculate"); dim3 threads_per_block(10, 10, 10); //app.add_option("-b,--block", threads_per_block, "Number of threads per block (3D)"); std::vector<int> rules = {5, 7, 6, 7}; app.add_option("-r,--rules", rules, "Number of threads per block (3D)"); int type = 1; app.add_option("-t,--type", type, "0 - all global, 1 - shared areas, 2 - continuous shared"); int skip = 1; app.add_option("-s,--skip", skip, "how often to write state (only for continuous shared)"); CLI11_PARSE(app, argc, argv); int length, width, heighth; read_size(input_filename, &length, &width, &heighth); cell field_h[length * width * heighth]; for (int i = 0; i < length; ++i) { for (int j = 0; j < width; ++j) { for (int k = 0; k < heighth; ++k) { field_h[i + j * length + k * length * width] = 0; } } } read_input(input_filename, field_h); cell* field0_d; cell* field1_d; size_t size = length * width * heighth * sizeof(cell); gpuErrchk(cudaMalloc(&field0_d, size)); gpuErrchk(cudaMalloc(&field1_d, size)); gpuErrchk(cudaMemcpy(field0_d, field_h, size, cudaMemcpyHostToDevice)); std::ofstream ofs; ofs.open(output_filename, std::ofstream::out | std::ofstream::trunc); ofs.close(); dim3 bpg(length / threads_per_block.x + 1, width / threads_per_block.y + 1, heighth / threads_per_block.z + 1); for (int i = 0; i < g; ++i) { append_state_to_file(output_filename, field0_d, length, width, heighth); switch (type) { case 0: cuda_kernels::calc_next_generation_all_global<<<bpg, threads_per_block>>>(field0_d, field1_d, length, width, heighth, rules[0], rules[1], rules[2], rules[3]); break; case 1: cuda_kernels::calc_next_generation_shared_areas<<<bpg, threads_per_block>>>(field0_d, field1_d, length, width, heighth, rules[0], rules[1], rules[2], rules[3]); break; case 2: cuda_kernels::calculate_n_generations<<<bpg, threads_per_block>>>(skip, field0_d, field1_d, length, width, heighth, rules[0], rules[1], rules[2], rules[3]); break; } gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); std::swap(field0_d, field1_d); } return 0; }
51a22736c508450b81d64d14f0be998c5ea17b36.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> extern "C" { __global__ void testKernel(int* addr, unsigned short param1, char param2) { addr[0] = param1 + param2; } } char* muGetErrorString(hipError_t result); void muEC(int position) //checks and outputs error position and error string { hipError_t errcode = hipGetLastError(); if(errcode==hipSuccess) { printf("No error at position %i\n", position); return; } printf("Error position: %i\nCode:%s\n", position, hipGetErrorString(errcode)); } void muRC(int position, hipError_t result) { if(result==0) printf("Success at %i\n", position); else printf("Error at %i:%s\n", position, muGetErrorString(result)); } char* muGetErrorString(hipError_t result) { switch(result) { case 0: return "Success"; case 1: return "Invalid value"; case 2: return "Out of memory"; case 3: return "Not Initialized"; case 4: return "Deinitialized"; case 100: return "No device"; case 101: return "Invalid device"; case 200: return "Invalid image"; case 201: return "Invalid context"; case 202: return "Context already current"; case 205: return "Map failed"; case 206: return "Unmap failed"; case 207: return "Array is mapped"; case 208: return "Already mapped"; case 209: return "No binary for GPU"; case 210: return "Already acquired"; case 211: return "Not mapped"; case 300: return "Invalid source"; case 301: return "File not found"; case 400: return "Invalid handle"; case 500: return "Not found"; case 600: return "Not ready"; case 700: return "Launch failed"; case 701: return "Launch out of resources"; case 702: return "Launch timeout"; case 703: return "Launch incompatible texturing"; case 999: return "Unknown"; }; return "Unknown"; } int main( int argc, char** argv) { if(argc<3) { puts("arguments: cubinname kernelname length tcount interval choice"); puts(" length: number of 4-byte elements to allocate in memory"); puts(" tcount: number of threads"); puts(" interval: number of output items per group"); puts(" choice: 0, all; 1, odd group only; 2, even group only; 3: none"); return 0; } int length = 8; if(argc>=4) { length = atoi(argv[3]); } int tcount = 1; if(argc>=5) { tcount = atoi(argv[4]); } int* cpu_output=new int[length]; int size = sizeof(int)*length; int interval = 1; if(argc>=6) { interval = atoi(argv[5]); } bool odd = true; bool even = true; if(argc>=7) { int choice = atoi(argv[6]); if(choice==1) even = false; else if(choice==2) odd = false; else if(choice==3) { even = false; odd = false; } } hipDeviceptr_t gpu_output; hipDevice_t device; hipCtx_t context; muRC(100, hipInit(0)); muRC(95, hipDeviceGet(&device, 0)); muRC(92, hipCtxCreate(&context, HIP_CTX_SCHED_SPIN, device)); //muRC(91, hipCtxSetCacheConfig(hipFuncCachePreferShared)); muRC(91, hipCtxSetCacheConfig(hipFuncCachePreferEqual)); muRC(90, cuMemAlloc(&gpu_output, size)); hipEvent_t eStart, eStop; muRC(89, hipEventCreate(&eStart, hipEventDefault)); muRC(88, hipEventCreate(&eStop, hipEventDefault)); hipModule_t module; hipFunction_t kernel; hipError_t result = hipModuleLoad(&module, argv[1]); muRC(0 , result); result = hipModuleGetFunction(&kernel, module, argv[2]); muRC(1, result); int param = 0x1010; muRC(2, hipParamSetSize(kernel, 20)); muRC(3, hipParamSetv(kernel, 0, &gpu_output, 8)); muRC(3, hipParamSetv(kernel, 16, &param, 4)); muRC(4, hipFuncSetBlockShape(kernel, tcount,1,1)); muRC(41, hipEventRecord(eStart,0) ); muRC(5, hipLaunch(kernel)); muRC(51, hipEventRecord(eStop,0) ); muRC(6, cuMemcpyDtoH(cpu_output, gpu_output, size)); muRC(7, hipCtxSynchronize()); float time; muRC(75, hipEventElapsedTime(&time, eStart, eStop)); printf("length=%i\n", length); printf("tcount=%i\n", tcount); printf("time=%f\n", time); for(int i=0; i<length/interval; i++) { if(i%2==0) { if(!even) continue; } else { if(!odd) continue; } for(int j=0; j<interval; j++) printf("i=%i, j=%i, output=%i\n", i, j, cpu_output[i*interval+j]); if(interval!=1) puts(""); } muRC(8, hipModuleUnload(module)); muRC(9, hipFree(gpu_output)); muRC(10, hipCtxDestroy(context)); delete[] cpu_output; return 0; }
51a22736c508450b81d64d14f0be998c5ea17b36.cu
#include <stdlib.h> #include <stdio.h> #include <cuda.h> extern "C" { __global__ void testKernel(int* addr, unsigned short param1, char param2) { addr[0] = param1 + param2; } } char* muGetErrorString(CUresult result); void muEC(int position) //checks and outputs error position and error string { cudaError_t errcode = cudaGetLastError(); if(errcode==cudaSuccess) { printf("No error at position %i\n", position); return; } printf("Error position: %i\nCode:%s\n", position, cudaGetErrorString(errcode)); } void muRC(int position, CUresult result) { if(result==0) printf("Success at %i\n", position); else printf("Error at %i:%s\n", position, muGetErrorString(result)); } char* muGetErrorString(CUresult result) { switch(result) { case 0: return "Success"; case 1: return "Invalid value"; case 2: return "Out of memory"; case 3: return "Not Initialized"; case 4: return "Deinitialized"; case 100: return "No device"; case 101: return "Invalid device"; case 200: return "Invalid image"; case 201: return "Invalid context"; case 202: return "Context already current"; case 205: return "Map failed"; case 206: return "Unmap failed"; case 207: return "Array is mapped"; case 208: return "Already mapped"; case 209: return "No binary for GPU"; case 210: return "Already acquired"; case 211: return "Not mapped"; case 300: return "Invalid source"; case 301: return "File not found"; case 400: return "Invalid handle"; case 500: return "Not found"; case 600: return "Not ready"; case 700: return "Launch failed"; case 701: return "Launch out of resources"; case 702: return "Launch timeout"; case 703: return "Launch incompatible texturing"; case 999: return "Unknown"; }; return "Unknown"; } int main( int argc, char** argv) { if(argc<3) { puts("arguments: cubinname kernelname length tcount interval choice"); puts(" length: number of 4-byte elements to allocate in memory"); puts(" tcount: number of threads"); puts(" interval: number of output items per group"); puts(" choice: 0, all; 1, odd group only; 2, even group only; 3: none"); return 0; } int length = 8; if(argc>=4) { length = atoi(argv[3]); } int tcount = 1; if(argc>=5) { tcount = atoi(argv[4]); } int* cpu_output=new int[length]; int size = sizeof(int)*length; int interval = 1; if(argc>=6) { interval = atoi(argv[5]); } bool odd = true; bool even = true; if(argc>=7) { int choice = atoi(argv[6]); if(choice==1) even = false; else if(choice==2) odd = false; else if(choice==3) { even = false; odd = false; } } CUdeviceptr gpu_output; CUdevice device; CUcontext context; muRC(100, cuInit(0)); muRC(95, cuDeviceGet(&device, 0)); muRC(92, cuCtxCreate(&context, CU_CTX_SCHED_SPIN, device)); //muRC(91, cuCtxSetCacheConfig(CU_FUNC_CACHE_PREFER_SHARED)); muRC(91, cuCtxSetCacheConfig(CU_FUNC_CACHE_PREFER_EQUAL)); muRC(90, cuMemAlloc(&gpu_output, size)); CUevent eStart, eStop; muRC(89, cuEventCreate(&eStart, CU_EVENT_DEFAULT)); muRC(88, cuEventCreate(&eStop, CU_EVENT_DEFAULT)); CUmodule module; CUfunction kernel; CUresult result = cuModuleLoad(&module, argv[1]); muRC(0 , result); result = cuModuleGetFunction(&kernel, module, argv[2]); muRC(1, result); int param = 0x1010; muRC(2, cuParamSetSize(kernel, 20)); muRC(3, cuParamSetv(kernel, 0, &gpu_output, 8)); muRC(3, cuParamSetv(kernel, 16, &param, 4)); muRC(4, cuFuncSetBlockShape(kernel, tcount,1,1)); muRC(41, cuEventRecord(eStart,0) ); muRC(5, cuLaunch(kernel)); muRC(51, cuEventRecord(eStop,0) ); muRC(6, cuMemcpyDtoH(cpu_output, gpu_output, size)); muRC(7, cuCtxSynchronize()); float time; muRC(75, cuEventElapsedTime(&time, eStart, eStop)); printf("length=%i\n", length); printf("tcount=%i\n", tcount); printf("time=%f\n", time); for(int i=0; i<length/interval; i++) { if(i%2==0) { if(!even) continue; } else { if(!odd) continue; } for(int j=0; j<interval; j++) printf("i=%i, j=%i, output=%i\n", i, j, cpu_output[i*interval+j]); if(interval!=1) puts(""); } muRC(8, cuModuleUnload(module)); muRC(9, cuMemFree(gpu_output)); muRC(10, cuCtxDestroy(context)); delete[] cpu_output; return 0; }
269f37eb3336adbbe66f124ff504b899d715b843.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _KANULIAREPART_CU_ #define _KANULIAREPART_CU_ #include <stdio.h> //#include "cutil_inline.h" #include "kanulia.h" #include "kanuliacalc.cu" // Rotation de quaternion __device__ inline void rotate4(float *px, float *py, float *pz, float *pw, const float4 angle) { float t; if (angle.x != 0. ) { t = *py * cos(angle.x) + *pz * sin(angle.x); *pz = - *py * sin(angle.x) + *pz * cos(angle.x); *py = t; }; if (angle.y != 0. ) { t = *px * cos(angle.y) + *pz * sin(angle.y); *pz = - *px * sin(angle.y) + *pz * cos(angle.y); *px = t; }; if (angle.z != 0. ) { t = *pz * cos(angle.z) + *pw * sin(angle.z); *pw = - *pz * sin(angle.z) + *pw * cos(angle.z); *pz = t; }; if (angle.w != 0. ) { t = *py * cos(angle.w) + *pw * sin(angle.w); *pw = - *py * sin(angle.w) + *pw * cos(angle.w); *py = t; }; } __device__ inline void rotate4inv(float *px, float *py, float *pz, float *pw, const float4 angle) { float t; if (angle.w != 0. ) { t = *py * cos(-angle.w) + *pw * sin(-angle.w); *pw = - *py * sin(-angle.w) + *pw * cos(-angle.w); *py = t; }; if (angle.z != 0. ) { t = *pz * cos(-angle.z) + *pw * sin(-angle.z); *pw = - *pz * sin(-angle.z) + *pw * cos(-angle.z); *pz = t; }; if (angle.y != 0. ) { t = *px * cos(-angle.y) + *pz * sin(-angle.y); *pz = - *px * sin(-angle.y) + *pz * cos(-angle.y); *px = t; }; if (angle.x != 0. ) { t = *py * cos(-angle.x) + *pz * sin(-angle.x); *pz = - *py * sin(-angle.x) + *pz * cos(-angle.x); *py = t; }; } __device__ inline void rotate3(float *px, float *py, float *pz, const float4 angle) { float t; if (angle.x != 0. ) { t = *py * cos(angle.x) + *pz * sin(angle.x); *pz = - *py * sin(angle.x) + *pz * cos(angle.x); *py = t; }; if (angle.y != 0. ) { t = *px * cos(angle.y) + *pz * sin(angle.y); *pz = - *px * sin(angle.y) + *pz * cos(angle.y); *px = t; }; if (angle.z != 0. ) { t = *px * cos(angle.z) - *py * sin(angle.z); *py = *px * sin(angle.z) + *py * cos(angle.z); *px = t; }; /* if (angle.w != 0. ) { t = *py * cos(angle.w) + *pw * sin(angle.w); *pw = - *py * sin(angle.w) + *pw * cos(angle.w); *py = t; };*/ } // The Julia4D CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ // The core Julia CUDA GPU calculation function /**/ __device__ int CloudJulia4D(const float ox, const float oy, const float oz, const float ow, const float4 JS, const float dx, const float dy, const float dz, const float dw, int *r, int *g, int *b, int nb, const unsigned int crn) { float ret = 0; float x = ox; float y = oy; float z = oz; float w = ow; int c = nb; do { x += dx; y += dy; z += dz; w += dw; if (CalcJulia4D(x, y, z, w, JS, crn) == 0) ret += 1; } while (c--); if (ret>255) ret = 255; if (ret == 0) { *r = 0; *g = 0; *b = 0; } else { *r = ret; *g = ret; *b = 155; } return ret; } // CloudJulia4D /** return if y is cutted by the cutjulia option*/ /* if non 0.0 it is cutted out and the return value is the distance to the next un cutted plan */ __device__ bool iscuttedout(bool cutjulia,float y){ float d = 0.15f;// distance between 2 layer start float h = 0.02f;// width of the layer, ( h < d ) if (!cutjulia) return false; // float ymodd = abs(y) - (int(abs(y) / d))*d; // = y % d // return (ymodd > h / 2.) && (ymodd < d - h / 2.0); float ymodd = (y/*+10.0*d*/) - (int((y/*+10.0*d*/) / d))*d; // = y % d return (ymodd > h ); } /** return distance factor to next uncutted out plan */ __device__ float getstepstonextplan(float y,float dy) { float d = 0.15f;// distance between 2 layer start //float h = 0.02f;// width of the layer, ( h < d ) float ymodd = y - (int(y / d))*d; // = y % d /* if (dy > 0.) return (d - (h / 2.0) - ymodd) / dy; else return ((h / 2.0) - ymodd ) / dy;*/ return (d - ymodd) / dy; } // The core Julia CUDA GPU calculation function __device__ int SolidJulia4D(const int ix, const int iy, const float4 JS, const float4 angle, const int d_imageW, const int d_imageH, const float scaleJ, const float xblur, const float yblur, int *r, int *g, int *b, const float xJOff, const float yJOff, const unsigned int crn, int julia4D, const bool cutjulia) { //hue color float hue; float dist = 6.0; float step = RAYSTEP; float x = ((float)ix + (xblur)) * scaleJ + xJOff; float y = ((float)iy + (yblur)) * scaleJ + yJOff; float z = ZOBSERVER; float w = 0.0; if (julia4D & CROSSEYE) { if (ix < (d_imageW / 2.)) // image gauche x = ((float)ix + (d_imageW / 4.) + (xblur)) * scaleJ + xJOff + SPACEEYE; else // image droite x = ((float)ix - (d_imageW / 4.) + (xblur)) * scaleJ + xJOff - SPACEEYE; } else { x = ((float)ix + (xblur)) * scaleJ + xJOff; } float dx = sin(KANULFOV * step * scaleJ * ((float)ix + (xblur)-(d_imageW / 2.)) / ((float)d_imageW)); float dy = sin(KANULFOV * step * scaleJ * ((float)iy + (yblur)-(d_imageH / 2.)) / ((float)d_imageW)); float dz = step; float dw = 0.; if (julia4D & CROSSEYE) { if (ix < (d_imageW / 2.)) // image gauche dx -= CROSSANGLE; else // image droite dx += CROSSANGLE; } rotate4(&x, &y, &z, &w, angle); rotate4(&dx, &dy, &dz, &dw, angle); float nd = sqrt(dx*dx + dy*dy + dz*dz + dw*dw); // float mx = 0.; // float ndx =dx/nd;float ndy =dy/nd;float ndz =dz/nd;float ndw =dw/nd; int nb = (dist / step); // hum sert a rien ? float x0 = 0.0; float y0 = -1.0; float z0 = 0.0; float w0 = 0.0;// normal is the secant plan's normal // Les trois rays qui vont servir a calculer la normale float x1 = step; float y1 = 0.0; float z1 = 0.0; float w1 = 0.0; float x2 = 0.0; float y2 = step; float z2 = 0.0; float w2 = 0.0; //float x3 = 0.0;float y3 = 0.0;float z3 = 0.0;float w3 = step; rotate4(&x1, &y1, &z1, &w1, angle); rotate4(&x2, &y2, &z2, &w2, angle); // light source direction float xl = 1.; float yl = -1.; float zl = 1.; float wl = 0.; float ddx = dx; float ddy = dy; float ddz = dz; float ddw = dw; int c = nb; bool out = true; // if ray is out main c=0 bool cutplan = false; // if ray hit cutting plan // int logout = 0; do { // if inside empty aera if (iscuttedout(cutjulia,y)) { // hit the surface float dhit = getstepstonextplan(y,dy); //if (logout == 1000) { // printf("%f pouet ", dhit); // logout = 0; //} //else { // logout++; //} x += dx * dhit; y += dy * dhit; z += dz * dhit; w += dw * dhit; if (CalcJulia4Dcore(x, y, z, w, JS, &hue) >= crn) { c = 0; // stop, we hit the inside // for normal 3D x1 = x + x1; y1 = y + y1; z1 = z + z1; w1 = w + w1; dhit = -y1 / dy; x1 += dx * dhit; y1 += dy * dhit; z1 += dz * dhit; w1 += dw * dhit; x2 = x + x2; y2 = y + y2; z2 = z + z2; w2 = w + w2; dhit = -y2 / dy; x2 += dx * dhit; y2 += dy * dhit; z2 += dz * dhit; w2 += dw * dhit; //x3=x + x3; //y3=y + y3; //z3=z + z3; //w3=w + w3; //dhit = -y3/dy; //x3 += dx * dhit; //y3 += dy * dhit; //z3 += dz * dhit; //w3 += dw * dhit; cutplan = true; out = false; // } } } else { /* if (logout == 1000) { printf("%f pouIIIt ",dy); logout = 0; } else { logout++; }*/ x += dx; y += dy; z += dz; w += dw; // x += ndx*step;y += ndy*step;z += ndz*step;w += ndw*step; // if (CalcJulia4Dstep(x, y, z, w, JS, crn,&step)==0) if (CalcJulia4D(x, y, z, w, JS, crn) == 0) { // ray is not out. we ll see if normal is out now out = false; c = 12; // for normal 3D x1 = x + x1; y1 = y + y1; z1 = z + z1; w1 = w + w1; x2 = x + x2; y2 = y + y2; z2 = z + z2; w2 = w + w2; //x3=x + x3; //y3=y + y3; //z3=z + z3; //w3=w + w3; ddx = dx; ddy = dy; ddz = dz; ddw = dw; float d1x = dx*2.0; float d1y = dy*2.0; float d1z = dz*2.0; float d1w = dw*2.0; float d2x = dx*2.0; float d2y = dy*2.0; float d2z = dz*2.0; float d2w = dw*2.0; //float d3x=dx*2.0;float d3y=dy*2.0;float d3z=dz*2.0;float d3w=dw*2.0; int in = 0, in1 = 0, in2 = 0;//,in3=0; // place les 3 rayons pour les normales contre la forme if (CalcJulia4D(x1, y1, z1, w1, JS, crn) == 0) { do { x1 -= d1x; y1 -= d1y; z1 -= d1z; w1 -= d1w; if (x1*x1 + y1*y1 + z1*z1 + w1*w1 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x1, y1, z1, w1, JS, crn) == 0) && (!out)); } else { do { x1 += d1x; y1 += d1y; z1 += d1z; w1 += d1w; if (x1*x1 + y1*y1 + z1*z1 + w1*w1 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x1, y1, z1, w1, JS, crn) != 0) && (!out)); } if (CalcJulia4D(x2, y2, z2, w2, JS, crn) == 0) { do { x2 -= d2x; y2 -= d2y; z2 -= d2z; w2 -= d2w; if (x2*x2 + y2*y2 + z2*z2 + w2*w2 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x2, y2, z2, w2, JS, crn) == 0) && (!out)); } else { do { x2 += d2x; y2 += d2y; z2 += d2z; w2 += d2w; if (x2*x2 + y2*y2 + z2*z2 + w2*w2 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x2, y2, z2, w2, JS, crn) != 0) && (!out)); } //if (CalcJulia4D(x3, y3, z3, w3, JS, crn)==0) //{ // do { // x3 -= d3x;y3 -= d3y;z3 -= d3z;w2 -= d3w; // if (x3*x3 + y3*y3 + z3*z3 + w3*w3 > OUTMANDELBOX) out=true; // } while ((CalcJulia4D(x3, y3, z3, w3, JS, crn) == 0) && (!out) ); //} else { // do { // x3 += d3x;y3 += d3y;z3 += d3z;w3 += d3w; // if (x3*x3 + y3*y3 + z3*z3 + w3*w3 > OUTMANDELBOX) out=true; // } while ((CalcJulia4D(x3, y3, z3, w3, JS, crn) != 0) && (!out) ); //} if (!out) { do { in = CalcJulia4Dhue(x, y, z, w, JS, &hue, crn); in1 = CalcJulia4D(x1, y1, z1, w1, JS, crn); in2 = CalcJulia4D(x2, y2, z2, w2, JS, crn); //in3 = CalcJulia4D(x3, y3, z3, w3, JS, crn); if (in == 0) { x -= ddx; y -= ddy; z -= ddz; w -= ddw; } else { x += ddx; y += ddy; z += ddz; w += ddw; } if (in1 == 0) { x1 -= d1x; y1 -= d1y; z1 -= d1z; w1 -= d1w; } else { x1 += d1x; y1 += d1y; z1 += d1z; w1 += d1w; } if (in2 == 0) { x2 -= d2x; y2 -= d2y; z2 -= d2z; w2 -= d2w; } else { x2 += d2x; y2 += d2y; z2 += d2z; w2 += d2w; } //if (in3==0) { // x3 -= d3x;y3 -= d3y;z3 -= d3z;w3 -= d3w; //} else { // x3 += d3x;y3 += d3y;z3 += d3z;w3 += d3w; //} ddx /= 2.0; ddy /= 2.0; ddz /= 2.0; ddw /= 2.0; d1x /= 2.0; d1y /= 2.0; d1z /= 2.0; d1w /= 2.0; d2x /= 2.0; d2y /= 2.0; d2z /= 2.0; d2w /= 2.0; //d3x /= 2.0;d3y /= 2.0;d3z /= 2.0;d3w /= 2.0; } while (c-->0); } else c = 1; } } // if (mx>4.0) c=1; } while (c-->0); if (out) { // while (x*x+y*y+z*z+w*w<OUTBOX) /* while ((x<OUTBOX)&&(x>-OUTBOX) &&(y<OUTBOX)&&(y>-OUTBOX) &&(z<OUTBOX)&&(z>-OUTBOX) &&(w<OUTBOX)&&(w>-OUTBOX))*/ /* { x+=dx;y+=dy;z+=dz;w+=dw; }*/ *r = 1; *g = 1; *b = 1; // if ((x-(float)((int)(x*1.))/1.<0.01) // ||(y-(float)((int)(y*1.))/1.<0.01) // ||(z-(float)((int)(z*10.))/10.<0.01) // ||(w-(float)((int)(w*10.))/10.<0.01) // ) /* if ( (ABS(x-(float)((int)(x*7.))/7.)<0.01) ||(ABS(y-(float)((int)(y*7.))/7.)<0.01) ||(ABS(z-(float)((int)(z*7.))/7.)<0.01) ||(ABS(w-(float)((int)(w*7.))/7.)<0.01) ) { *r = 255; *g = 255; *b = 255; }*/ } else { // computing vector x1 -= x; y1 -= y; z1 -= z; w1 -= w; x2 -= x; y2 -= y; z2 -= z; w2 -= w; //x3 -= x;y3 -= y;z3 -= z;w3 -= w; // vector product for normal // 3D Normal in space vue //x0 = x1 * x2 - y1 * y2 - z1 * z2 - w1* w2; //y0 = x1 * y2 + y1 * x2 + z1 * w2 - w1* z2; //z0 = x1 * z2 + z1 * x2 + w1 * y2 - y1* w2; //w0 = x1 * w2 + w1 * x2 + y1 * z2 - z1* y2; // 4D Normal //x0 = y1*(w2*z3-z2*w3)+y2*(z1*w3-w1*z3)+y3*(w1*z2-z1*w2); //y0 = x1*(z2*w3-w2*z3)+x2*(w1*z3-z1*w3)+x3*(z1*w2-w1*z2); //z0 = x1*(w2*y3-y2*w3)+x2*(y1*w3-w1*y3)+x3*(w1*y2-y1*w2); //w0 = x1*(y2*z3-z2*y3)+x2*(z1*y3-y1*z3)+x3*(y1*z2-z1*y2); // retour dans le repere de la cam rotate4inv(&dx, &dy, &dz, &dw, angle); rotate4inv(&x1, &y1, &z1, &w1, angle); rotate4inv(&x2, &y2, &z2, &w2, angle); // 3D Normal in space xyz x0 = z1 * y2 - y1 * z2; y0 = x1 * z2 - z1 * x2; z0 = y1 * x2 - x1 * y2; w0 = 0.; if (cutplan) { x0 = 0.0; y0 = -1.0; z0 = 0.0; w0 = 0.0;// normal is the secant plan's normal rotate4inv(&x0, &y0, &z0, &w0, angle); float n0 = sqrt(x0*x0 + y0*y0 + z0*z0);//+w0*w0); x0 /= n0; y0 /= n0; z0 /= n0;//w0/=n0; } // Normalisation float n0 = sqrt(x0*x0 + y0*y0 + z0*z0);//+w0*w0); float nl = sqrt(xl*xl + yl*yl + zl*zl);//+wl*wl); float nd = sqrt(dx*dx + dy*dy + dz*dz);//+dw*dw); x0 /= n0; y0 /= n0; z0 /= n0;//w0/=n0; xl /= nl; yl /= nl; zl /= nl;//wl/=nl; dx /= nd; dy /= nd; dz /= nd;//dw/=nd; // angle of direction / normal /* float anv = (x0 * dx + y0 *dy + z0 *dz + w0 *dw); if (anv<0.) anv=0.;*/ // angle of light direction / normal float anl = -(x0* xl + y0* yl + z0*zl);// + w0*wl); if (anl<0.) anl = 0.; // dx=0.;dy=0.;dz=1.;dw=0.; // radiance float anr = 0.; float pscal = (xl*x0 + yl*y0 + zl*z0);// + wl*w0); if (pscal < 0.) { float xr = xl - x0*2.*pscal; float yr = yl - y0*2.*pscal; float zr = zl - z0*2.*pscal;//float wr=wl-w0*2.*pscal; float nr = sqrt(xr*xr + yr*yr + zr*zr);//+wr*wr); xr /= nr; yr /= nr; zr /= nr;//wr/=nr; anr = -(xr*dx + yr*dy + zr*dz);// + wr*dw); // anr = -pscal; // anr = -(x0*dx + y0*dy + z0*dz + w0*dw); anr = anr * 8.5 - 7.; //if ( anr < 0.8 ) anr=0.; if (anr > 1.) anr = 1.; if (anr < 0.) anr = 0.; anr = anr*anr; } // shadow computation float sh = 1.0; bool shadow = false; // light source rotate with camera rotate4(&xl, &yl, &zl, &wl, angle); do { x -= xl*step; y -= yl*step; z -= zl*step; w -= wl*step; //if ((y > 0.) || (!cutjulia)) if (!iscuttedout(cutjulia,y)) if (CalcJulia4D(x, y, z, w, JS, crn) == 0) shadow = true; } while ((x*x + y*y + z*z + w*w < OUTMANDELBOX) && (!shadow) && (iscuttedout(cutjulia,y))); float li = anl*0.7 + 0.1; if (shadow) { sh = 0.5; anr = 0.0; } float L = (li + (1. - li)*anr) * sh; // if ( L < 0.0 ) L = 0.0; HSL2RGB(hue, 0.5, L, r, g, b); } return out; } // SolidJulia4D __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch __global__ void Julia4Drepart(uchar4 *dst, const int imageW, const int imageH, const float4 Off, const float4 JS, const float4 angle, const float scale, const float scalei, const float xJOff, const float yJOff, const float scaleJ, const float xblur, const float yblur, const unsigned int maxgropix, const unsigned int gropix, const unsigned int bloc, const unsigned int crn, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const int julia, const int julia4D, const bool cutjulia) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); //blockIndex++; blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while() __syncthreads(); #endif // if (blockIndex >= ((numBlocks/nbloc)+1)*(bloc+1)) break; // finish if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX * maxgropix + threadIdx.x * maxgropix + ((bloc * gropix) % maxgropix); const int iy = blockDim.y * blockY * maxgropix + threadIdx.y * maxgropix + ((bloc * gropix) / maxgropix) * gropix; int r = 0;int g = 0;int b = 0; bool seedre = false;bool seedim = false; if ((ix < imageW) && (iy < imageH)) { int m = 0; if ( (julia<32) && (ix < imageW / julia) && (iy < imageH / julia)) { // Calculate the location const float xPos = (float)ix * scale * julia + Off.x; const float yPos = (float)iy * scale * julia + Off.y; // Calculate the Mandelbrot index for the current location if (abs(JS.x-xPos)+abs(JS.y-yPos) < 2.1 * scale * julia ) { seedre = true; } if (!seedre) { float hue; // m = CalcMandelbrot(xPos , yPos); m = CalcMandel4Dcore(xPos, yPos, JS.z, JS.w, &hue); if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b); } } else if (julia4D&& (julia<32) &&((imageW - ix < imageW / julia) && (iy < imageH / julia))) { // Calculate the location const float zPos = (float)(imageW - ix) * scalei * julia + Off.z; const float wPos = (float)iy * scalei * julia + Off.w; // Calculate the Mandelbrot index for the current location if (abs(JS.z-zPos)+abs(JS.w-wPos) < 2.1 * scalei * julia ) { seedim = true; } if (!seedim) { float hue; // m = CalcMandelbrot(zPos , wPos); m = CalcMandel4Dcore(JS.x, JS.y, zPos, wPos, &hue); if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b); } } else { // Calculate the location const float xPos = (float)ix * scaleJ + xJOff; const float yPos = (float)iy * scaleJ + yJOff; /* const float zPos = (float)0.; const float wPos = (float)0.;*/ // Calculate the Mandelbrot index for the current location if (julia4D == JULIA2D) { m = CalcJulia(xPos, yPos, JS, crn); } if (julia4D == CLOUDJULIA) { float dist = 6.0; float step = 0.009; float ox = (float)ix * scaleJ + xJOff; float oy = (float)iy * scaleJ + yJOff; float oz = - 3.0; float ow = 0.0; float dx = sin( 0.7 * step * ( (float)ix + xblur - (imageW/2.)) / ((float) imageW) ); float dy = sin( 0.7 * step * ( (float)iy + yblur - (imageH/2.)) / ((float) imageW) ); float dz = step; float dw = 0.; rotate4(&ox,&oy,&oz,&ow,angle); rotate4(&dx,&dy,&dz,&dw,angle); int nb = (dist/step); m = CloudJulia4D(ox, oy, oz, ow, JS, dx, dy, dz, dw, &r, &g, &b, nb, crn); } if (julia4D & JULIA4D) { /* if ((julia4D & CROSSEYE)&& ( (sqrt( (float)((ix- imageW/4)*(ix- imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.) // si viseur ||(sqrt( (float)((ix-3*imageW/4)*(ix-3*imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.))) { r = 255; g = 255; b = 255; } else*/ m = SolidJulia4D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn,julia4D,cutjulia); // m = SolidMandelBox3D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn); } } // m = blockIdx.x; // uncomment to see scheduling order // Convert the Mandelbrot index into a color uchar4 color; // m = m > 0 ? crn - m : 0; if ((julia4D)&&((ix >= imageW / julia) || (iy >= imageH / julia))) { color.x = r; color.y = g; color.z = b; } else { if (seedim||seedre) { color.x = 150; color.y = 250; color.z = 250; } else { color.x = r; color.y = g; color.z = b; /* if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; }*/ } } // activer pour voir le calcul progressif // if (gropix==1) color.z += 120; // if (gropix==2) color.y += 120; // if (gropix==4) color.x += 120; // // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; if (gropix==1) dst[pixel] = color; else for (int i=0;i<gropix;i++) for (int j=0;j<gropix;j++) if ((ix+i<imageW)&&(iy+j<imageH)) dst[pixel+i+imageW*j] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // Julia4D0 // The host CPU Mandebrot thread spawner void RunJulia4Drepart(uchar4 *dst, const int imageW, const int imageH, const float4 Off, const float4 JS, const float4 angle, const double scale, const double scalei, const double xJOff, const double yJOff, const double scaleJ, const float xblur, const float yblur, const unsigned int maxgropix, const unsigned int gropix, const unsigned int bloc, const unsigned int crn, const uchar4 colors, const int frame, const int animationFrame, const int numSMs, const int julia, const int julia4D, const bool cutjulia) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW/maxgropix, BLOCKDIM_X), iDivUp(imageH/(maxgropix), BLOCKDIM_Y)); // zero block counter // unsigned int hBlockCounter = (((grid.x)*(grid.y)/nbloc)+1)*(bloc); unsigned int hBlockCounter = 0; /*cutilSafeCall( */ hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice /*)*/ ); int numWorkUnit = numSMs; hipLaunchKernelGGL(( Julia4Drepart), dim3(numWorkUnit), dim3(threads), 0, 0, dst, imageW, imageH, Off, JS, angle, (float)scale, (float)scalei, (float)xJOff, (float)yJOff, (float)scaleJ, xblur, yblur, maxgropix, gropix, bloc, crn, colors, frame, animationFrame, grid.x, (grid.x)*(grid.y), julia, julia4D, cutjulia); // cutilCheckMsg("Julia4D0_sm13 kernel execution failed.\n"); } // RunJulia4D0 // check if we're running in emulation mode int inEmulationMode() { #ifdef __DEVICE_EMULATION__ return 1; #else return 0; #endif } #endif
269f37eb3336adbbe66f124ff504b899d715b843.cu
#ifndef _KANULIAREPART_CU_ #define _KANULIAREPART_CU_ #include <stdio.h> //#include "cutil_inline.h" #include "kanulia.h" #include "kanuliacalc.cu" // Rotation de quaternion __device__ inline void rotate4(float *px, float *py, float *pz, float *pw, const float4 angle) { float t; if (angle.x != 0. ) { t = *py * cos(angle.x) + *pz * sin(angle.x); *pz = - *py * sin(angle.x) + *pz * cos(angle.x); *py = t; }; if (angle.y != 0. ) { t = *px * cos(angle.y) + *pz * sin(angle.y); *pz = - *px * sin(angle.y) + *pz * cos(angle.y); *px = t; }; if (angle.z != 0. ) { t = *pz * cos(angle.z) + *pw * sin(angle.z); *pw = - *pz * sin(angle.z) + *pw * cos(angle.z); *pz = t; }; if (angle.w != 0. ) { t = *py * cos(angle.w) + *pw * sin(angle.w); *pw = - *py * sin(angle.w) + *pw * cos(angle.w); *py = t; }; } __device__ inline void rotate4inv(float *px, float *py, float *pz, float *pw, const float4 angle) { float t; if (angle.w != 0. ) { t = *py * cos(-angle.w) + *pw * sin(-angle.w); *pw = - *py * sin(-angle.w) + *pw * cos(-angle.w); *py = t; }; if (angle.z != 0. ) { t = *pz * cos(-angle.z) + *pw * sin(-angle.z); *pw = - *pz * sin(-angle.z) + *pw * cos(-angle.z); *pz = t; }; if (angle.y != 0. ) { t = *px * cos(-angle.y) + *pz * sin(-angle.y); *pz = - *px * sin(-angle.y) + *pz * cos(-angle.y); *px = t; }; if (angle.x != 0. ) { t = *py * cos(-angle.x) + *pz * sin(-angle.x); *pz = - *py * sin(-angle.x) + *pz * cos(-angle.x); *py = t; }; } __device__ inline void rotate3(float *px, float *py, float *pz, const float4 angle) { float t; if (angle.x != 0. ) { t = *py * cos(angle.x) + *pz * sin(angle.x); *pz = - *py * sin(angle.x) + *pz * cos(angle.x); *py = t; }; if (angle.y != 0. ) { t = *px * cos(angle.y) + *pz * sin(angle.y); *pz = - *px * sin(angle.y) + *pz * cos(angle.y); *px = t; }; if (angle.z != 0. ) { t = *px * cos(angle.z) - *py * sin(angle.z); *py = *px * sin(angle.z) + *py * cos(angle.z); *px = t; }; /* if (angle.w != 0. ) { t = *py * cos(angle.w) + *pw * sin(angle.w); *pw = - *py * sin(angle.w) + *pw * cos(angle.w); *py = t; };*/ } // The Julia4D CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ // The core Julia CUDA GPU calculation function /**/ __device__ int CloudJulia4D(const float ox, const float oy, const float oz, const float ow, const float4 JS, const float dx, const float dy, const float dz, const float dw, int *r, int *g, int *b, int nb, const unsigned int crn) { float ret = 0; float x = ox; float y = oy; float z = oz; float w = ow; int c = nb; do { x += dx; y += dy; z += dz; w += dw; if (CalcJulia4D(x, y, z, w, JS, crn) == 0) ret += 1; } while (c--); if (ret>255) ret = 255; if (ret == 0) { *r = 0; *g = 0; *b = 0; } else { *r = ret; *g = ret; *b = 155; } return ret; } // CloudJulia4D /** return if y is cutted by the cutjulia option*/ /* if non 0.0 it is cutted out and the return value is the distance to the next un cutted plan */ __device__ bool iscuttedout(bool cutjulia,float y){ float d = 0.15f;// distance between 2 layer start float h = 0.02f;// width of the layer, ( h < d ) if (!cutjulia) return false; // float ymodd = abs(y) - (int(abs(y) / d))*d; // = y % d // return (ymodd > h / 2.) && (ymodd < d - h / 2.0); float ymodd = (y/*+10.0*d*/) - (int((y/*+10.0*d*/) / d))*d; // = y % d return (ymodd > h ); } /** return distance factor to next uncutted out plan */ __device__ float getstepstonextplan(float y,float dy) { float d = 0.15f;// distance between 2 layer start //float h = 0.02f;// width of the layer, ( h < d ) float ymodd = y - (int(y / d))*d; // = y % d /* if (dy > 0.) return (d - (h / 2.0) - ymodd) / dy; else return ((h / 2.0) - ymodd ) / dy;*/ return (d - ymodd) / dy; } // The core Julia CUDA GPU calculation function __device__ int SolidJulia4D(const int ix, const int iy, const float4 JS, const float4 angle, const int d_imageW, const int d_imageH, const float scaleJ, const float xblur, const float yblur, int *r, int *g, int *b, const float xJOff, const float yJOff, const unsigned int crn, int julia4D, const bool cutjulia) { //hue color float hue; float dist = 6.0; float step = RAYSTEP; float x = ((float)ix + (xblur)) * scaleJ + xJOff; float y = ((float)iy + (yblur)) * scaleJ + yJOff; float z = ZOBSERVER; float w = 0.0; if (julia4D & CROSSEYE) { if (ix < (d_imageW / 2.)) // image gauche x = ((float)ix + (d_imageW / 4.) + (xblur)) * scaleJ + xJOff + SPACEEYE; else // image droite x = ((float)ix - (d_imageW / 4.) + (xblur)) * scaleJ + xJOff - SPACEEYE; } else { x = ((float)ix + (xblur)) * scaleJ + xJOff; } float dx = sin(KANULFOV * step * scaleJ * ((float)ix + (xblur)-(d_imageW / 2.)) / ((float)d_imageW)); float dy = sin(KANULFOV * step * scaleJ * ((float)iy + (yblur)-(d_imageH / 2.)) / ((float)d_imageW)); float dz = step; float dw = 0.; if (julia4D & CROSSEYE) { if (ix < (d_imageW / 2.)) // image gauche dx -= CROSSANGLE; else // image droite dx += CROSSANGLE; } rotate4(&x, &y, &z, &w, angle); rotate4(&dx, &dy, &dz, &dw, angle); float nd = sqrt(dx*dx + dy*dy + dz*dz + dw*dw); // float mx = 0.; // float ndx =dx/nd;float ndy =dy/nd;float ndz =dz/nd;float ndw =dw/nd; int nb = (dist / step); // hum sert a rien ? float x0 = 0.0; float y0 = -1.0; float z0 = 0.0; float w0 = 0.0;// normal is the secant plan's normal // Les trois rays qui vont servir a calculer la normale float x1 = step; float y1 = 0.0; float z1 = 0.0; float w1 = 0.0; float x2 = 0.0; float y2 = step; float z2 = 0.0; float w2 = 0.0; //float x3 = 0.0;float y3 = 0.0;float z3 = 0.0;float w3 = step; rotate4(&x1, &y1, &z1, &w1, angle); rotate4(&x2, &y2, &z2, &w2, angle); // light source direction float xl = 1.; float yl = -1.; float zl = 1.; float wl = 0.; float ddx = dx; float ddy = dy; float ddz = dz; float ddw = dw; int c = nb; bool out = true; // if ray is out main c=0 bool cutplan = false; // if ray hit cutting plan // int logout = 0; do { // if inside empty aera if (iscuttedout(cutjulia,y)) { // hit the surface float dhit = getstepstonextplan(y,dy); //if (logout == 1000) { // printf("%f pouet ", dhit); // logout = 0; //} //else { // logout++; //} x += dx * dhit; y += dy * dhit; z += dz * dhit; w += dw * dhit; if (CalcJulia4Dcore(x, y, z, w, JS, &hue) >= crn) { c = 0; // stop, we hit the inside // for normal 3D x1 = x + x1; y1 = y + y1; z1 = z + z1; w1 = w + w1; dhit = -y1 / dy; x1 += dx * dhit; y1 += dy * dhit; z1 += dz * dhit; w1 += dw * dhit; x2 = x + x2; y2 = y + y2; z2 = z + z2; w2 = w + w2; dhit = -y2 / dy; x2 += dx * dhit; y2 += dy * dhit; z2 += dz * dhit; w2 += dw * dhit; //x3=x + x3; //y3=y + y3; //z3=z + z3; //w3=w + w3; //dhit = -y3/dy; //x3 += dx * dhit; //y3 += dy * dhit; //z3 += dz * dhit; //w3 += dw * dhit; cutplan = true; out = false; // } } } else { /* if (logout == 1000) { printf("%f pouIIIt ",dy); logout = 0; } else { logout++; }*/ x += dx; y += dy; z += dz; w += dw; // x += ndx*step;y += ndy*step;z += ndz*step;w += ndw*step; // if (CalcJulia4Dstep(x, y, z, w, JS, crn,&step)==0) if (CalcJulia4D(x, y, z, w, JS, crn) == 0) { // ray is not out. we ll see if normal is out now out = false; c = 12; // for normal 3D x1 = x + x1; y1 = y + y1; z1 = z + z1; w1 = w + w1; x2 = x + x2; y2 = y + y2; z2 = z + z2; w2 = w + w2; //x3=x + x3; //y3=y + y3; //z3=z + z3; //w3=w + w3; ddx = dx; ddy = dy; ddz = dz; ddw = dw; float d1x = dx*2.0; float d1y = dy*2.0; float d1z = dz*2.0; float d1w = dw*2.0; float d2x = dx*2.0; float d2y = dy*2.0; float d2z = dz*2.0; float d2w = dw*2.0; //float d3x=dx*2.0;float d3y=dy*2.0;float d3z=dz*2.0;float d3w=dw*2.0; int in = 0, in1 = 0, in2 = 0;//,in3=0; // place les 3 rayons pour les normales contre la forme if (CalcJulia4D(x1, y1, z1, w1, JS, crn) == 0) { do { x1 -= d1x; y1 -= d1y; z1 -= d1z; w1 -= d1w; if (x1*x1 + y1*y1 + z1*z1 + w1*w1 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x1, y1, z1, w1, JS, crn) == 0) && (!out)); } else { do { x1 += d1x; y1 += d1y; z1 += d1z; w1 += d1w; if (x1*x1 + y1*y1 + z1*z1 + w1*w1 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x1, y1, z1, w1, JS, crn) != 0) && (!out)); } if (CalcJulia4D(x2, y2, z2, w2, JS, crn) == 0) { do { x2 -= d2x; y2 -= d2y; z2 -= d2z; w2 -= d2w; if (x2*x2 + y2*y2 + z2*z2 + w2*w2 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x2, y2, z2, w2, JS, crn) == 0) && (!out)); } else { do { x2 += d2x; y2 += d2y; z2 += d2z; w2 += d2w; if (x2*x2 + y2*y2 + z2*z2 + w2*w2 > OUTMANDELBOX) out = true; } while ((CalcJulia4D(x2, y2, z2, w2, JS, crn) != 0) && (!out)); } //if (CalcJulia4D(x3, y3, z3, w3, JS, crn)==0) //{ // do { // x3 -= d3x;y3 -= d3y;z3 -= d3z;w2 -= d3w; // if (x3*x3 + y3*y3 + z3*z3 + w3*w3 > OUTMANDELBOX) out=true; // } while ((CalcJulia4D(x3, y3, z3, w3, JS, crn) == 0) && (!out) ); //} else { // do { // x3 += d3x;y3 += d3y;z3 += d3z;w3 += d3w; // if (x3*x3 + y3*y3 + z3*z3 + w3*w3 > OUTMANDELBOX) out=true; // } while ((CalcJulia4D(x3, y3, z3, w3, JS, crn) != 0) && (!out) ); //} if (!out) { do { in = CalcJulia4Dhue(x, y, z, w, JS, &hue, crn); in1 = CalcJulia4D(x1, y1, z1, w1, JS, crn); in2 = CalcJulia4D(x2, y2, z2, w2, JS, crn); //in3 = CalcJulia4D(x3, y3, z3, w3, JS, crn); if (in == 0) { x -= ddx; y -= ddy; z -= ddz; w -= ddw; } else { x += ddx; y += ddy; z += ddz; w += ddw; } if (in1 == 0) { x1 -= d1x; y1 -= d1y; z1 -= d1z; w1 -= d1w; } else { x1 += d1x; y1 += d1y; z1 += d1z; w1 += d1w; } if (in2 == 0) { x2 -= d2x; y2 -= d2y; z2 -= d2z; w2 -= d2w; } else { x2 += d2x; y2 += d2y; z2 += d2z; w2 += d2w; } //if (in3==0) { // x3 -= d3x;y3 -= d3y;z3 -= d3z;w3 -= d3w; //} else { // x3 += d3x;y3 += d3y;z3 += d3z;w3 += d3w; //} ddx /= 2.0; ddy /= 2.0; ddz /= 2.0; ddw /= 2.0; d1x /= 2.0; d1y /= 2.0; d1z /= 2.0; d1w /= 2.0; d2x /= 2.0; d2y /= 2.0; d2z /= 2.0; d2w /= 2.0; //d3x /= 2.0;d3y /= 2.0;d3z /= 2.0;d3w /= 2.0; } while (c-->0); } else c = 1; } } // if (mx>4.0) c=1; } while (c-->0); if (out) { // while (x*x+y*y+z*z+w*w<OUTBOX) /* while ((x<OUTBOX)&&(x>-OUTBOX) &&(y<OUTBOX)&&(y>-OUTBOX) &&(z<OUTBOX)&&(z>-OUTBOX) &&(w<OUTBOX)&&(w>-OUTBOX))*/ /* { x+=dx;y+=dy;z+=dz;w+=dw; }*/ *r = 1; *g = 1; *b = 1; // if ((x-(float)((int)(x*1.))/1.<0.01) // ||(y-(float)((int)(y*1.))/1.<0.01) // ||(z-(float)((int)(z*10.))/10.<0.01) // ||(w-(float)((int)(w*10.))/10.<0.01) // ) /* if ( (ABS(x-(float)((int)(x*7.))/7.)<0.01) ||(ABS(y-(float)((int)(y*7.))/7.)<0.01) ||(ABS(z-(float)((int)(z*7.))/7.)<0.01) ||(ABS(w-(float)((int)(w*7.))/7.)<0.01) ) { *r = 255; *g = 255; *b = 255; }*/ } else { // computing vector x1 -= x; y1 -= y; z1 -= z; w1 -= w; x2 -= x; y2 -= y; z2 -= z; w2 -= w; //x3 -= x;y3 -= y;z3 -= z;w3 -= w; // vector product for normal // 3D Normal in space vue //x0 = x1 * x2 - y1 * y2 - z1 * z2 - w1* w2; //y0 = x1 * y2 + y1 * x2 + z1 * w2 - w1* z2; //z0 = x1 * z2 + z1 * x2 + w1 * y2 - y1* w2; //w0 = x1 * w2 + w1 * x2 + y1 * z2 - z1* y2; // 4D Normal //x0 = y1*(w2*z3-z2*w3)+y2*(z1*w3-w1*z3)+y3*(w1*z2-z1*w2); //y0 = x1*(z2*w3-w2*z3)+x2*(w1*z3-z1*w3)+x3*(z1*w2-w1*z2); //z0 = x1*(w2*y3-y2*w3)+x2*(y1*w3-w1*y3)+x3*(w1*y2-y1*w2); //w0 = x1*(y2*z3-z2*y3)+x2*(z1*y3-y1*z3)+x3*(y1*z2-z1*y2); // retour dans le repere de la cam rotate4inv(&dx, &dy, &dz, &dw, angle); rotate4inv(&x1, &y1, &z1, &w1, angle); rotate4inv(&x2, &y2, &z2, &w2, angle); // 3D Normal in space xyz x0 = z1 * y2 - y1 * z2; y0 = x1 * z2 - z1 * x2; z0 = y1 * x2 - x1 * y2; w0 = 0.; if (cutplan) { x0 = 0.0; y0 = -1.0; z0 = 0.0; w0 = 0.0;// normal is the secant plan's normal rotate4inv(&x0, &y0, &z0, &w0, angle); float n0 = sqrt(x0*x0 + y0*y0 + z0*z0);//+w0*w0); x0 /= n0; y0 /= n0; z0 /= n0;//w0/=n0; } // Normalisation float n0 = sqrt(x0*x0 + y0*y0 + z0*z0);//+w0*w0); float nl = sqrt(xl*xl + yl*yl + zl*zl);//+wl*wl); float nd = sqrt(dx*dx + dy*dy + dz*dz);//+dw*dw); x0 /= n0; y0 /= n0; z0 /= n0;//w0/=n0; xl /= nl; yl /= nl; zl /= nl;//wl/=nl; dx /= nd; dy /= nd; dz /= nd;//dw/=nd; // angle of direction / normal /* float anv = (x0 * dx + y0 *dy + z0 *dz + w0 *dw); if (anv<0.) anv=0.;*/ // angle of light direction / normal float anl = -(x0* xl + y0* yl + z0*zl);// + w0*wl); if (anl<0.) anl = 0.; // dx=0.;dy=0.;dz=1.;dw=0.; // radiance float anr = 0.; float pscal = (xl*x0 + yl*y0 + zl*z0);// + wl*w0); if (pscal < 0.) { float xr = xl - x0*2.*pscal; float yr = yl - y0*2.*pscal; float zr = zl - z0*2.*pscal;//float wr=wl-w0*2.*pscal; float nr = sqrt(xr*xr + yr*yr + zr*zr);//+wr*wr); xr /= nr; yr /= nr; zr /= nr;//wr/=nr; anr = -(xr*dx + yr*dy + zr*dz);// + wr*dw); // anr = -pscal; // anr = -(x0*dx + y0*dy + z0*dz + w0*dw); anr = anr * 8.5 - 7.; //if ( anr < 0.8 ) anr=0.; if (anr > 1.) anr = 1.; if (anr < 0.) anr = 0.; anr = anr*anr; } // shadow computation float sh = 1.0; bool shadow = false; // light source rotate with camera rotate4(&xl, &yl, &zl, &wl, angle); do { x -= xl*step; y -= yl*step; z -= zl*step; w -= wl*step; //if ((y > 0.) || (!cutjulia)) if (!iscuttedout(cutjulia,y)) if (CalcJulia4D(x, y, z, w, JS, crn) == 0) shadow = true; } while ((x*x + y*y + z*z + w*w < OUTMANDELBOX) && (!shadow) && (iscuttedout(cutjulia,y))); float li = anl*0.7 + 0.1; if (shadow) { sh = 0.5; anr = 0.0; } float L = (li + (1. - li)*anr) * sh; // if ( L < 0.0 ) L = 0.0; HSL2RGB(hue, 0.5, L, r, g, b); } return out; } // SolidJulia4D __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch __global__ void Julia4Drepart(uchar4 *dst, const int imageW, const int imageH, const float4 Off, const float4 JS, const float4 angle, const float scale, const float scalei, const float xJOff, const float yJOff, const float scaleJ, const float xblur, const float yblur, const unsigned int maxgropix, const unsigned int gropix, const unsigned int bloc, const unsigned int crn, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const int julia, const int julia4D, const bool cutjulia) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); //blockIndex++; blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while() __syncthreads(); #endif // if (blockIndex >= ((numBlocks/nbloc)+1)*(bloc+1)) break; // finish if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX * maxgropix + threadIdx.x * maxgropix + ((bloc * gropix) % maxgropix); const int iy = blockDim.y * blockY * maxgropix + threadIdx.y * maxgropix + ((bloc * gropix) / maxgropix) * gropix; int r = 0;int g = 0;int b = 0; bool seedre = false;bool seedim = false; if ((ix < imageW) && (iy < imageH)) { int m = 0; if ( (julia<32) && (ix < imageW / julia) && (iy < imageH / julia)) { // Calculate the location const float xPos = (float)ix * scale * julia + Off.x; const float yPos = (float)iy * scale * julia + Off.y; // Calculate the Mandelbrot index for the current location if (abs(JS.x-xPos)+abs(JS.y-yPos) < 2.1 * scale * julia ) { seedre = true; } if (!seedre) { float hue; // m = CalcMandelbrot(xPos , yPos); m = CalcMandel4Dcore(xPos, yPos, JS.z, JS.w, &hue); if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b); } } else if (julia4D&& (julia<32) &&((imageW - ix < imageW / julia) && (iy < imageH / julia))) { // Calculate the location const float zPos = (float)(imageW - ix) * scalei * julia + Off.z; const float wPos = (float)iy * scalei * julia + Off.w; // Calculate the Mandelbrot index for the current location if (abs(JS.z-zPos)+abs(JS.w-wPos) < 2.1 * scalei * julia ) { seedim = true; } if (!seedim) { float hue; // m = CalcMandelbrot(zPos , wPos); m = CalcMandel4Dcore(JS.x, JS.y, zPos, wPos, &hue); if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b); } } else { // Calculate the location const float xPos = (float)ix * scaleJ + xJOff; const float yPos = (float)iy * scaleJ + yJOff; /* const float zPos = (float)0.; const float wPos = (float)0.;*/ // Calculate the Mandelbrot index for the current location if (julia4D == JULIA2D) { m = CalcJulia(xPos, yPos, JS, crn); } if (julia4D == CLOUDJULIA) { float dist = 6.0; float step = 0.009; float ox = (float)ix * scaleJ + xJOff; float oy = (float)iy * scaleJ + yJOff; float oz = - 3.0; float ow = 0.0; float dx = sin( 0.7 * step * ( (float)ix + xblur - (imageW/2.)) / ((float) imageW) ); float dy = sin( 0.7 * step * ( (float)iy + yblur - (imageH/2.)) / ((float) imageW) ); float dz = step; float dw = 0.; rotate4(&ox,&oy,&oz,&ow,angle); rotate4(&dx,&dy,&dz,&dw,angle); int nb = (dist/step); m = CloudJulia4D(ox, oy, oz, ow, JS, dx, dy, dz, dw, &r, &g, &b, nb, crn); } if (julia4D & JULIA4D) { /* if ((julia4D & CROSSEYE)&& ( (sqrt( (float)((ix- imageW/4)*(ix- imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.) // si viseur ||(sqrt( (float)((ix-3*imageW/4)*(ix-3*imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.))) { r = 255; g = 255; b = 255; } else*/ m = SolidJulia4D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn,julia4D,cutjulia); // m = SolidMandelBox3D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn); } } // m = blockIdx.x; // uncomment to see scheduling order // Convert the Mandelbrot index into a color uchar4 color; // m = m > 0 ? crn - m : 0; if ((julia4D)&&((ix >= imageW / julia) || (iy >= imageH / julia))) { color.x = r; color.y = g; color.z = b; } else { if (seedim||seedre) { color.x = 150; color.y = 250; color.z = 250; } else { color.x = r; color.y = g; color.z = b; /* if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; }*/ } } // activer pour voir le calcul progressif // if (gropix==1) color.z += 120; // if (gropix==2) color.y += 120; // if (gropix==4) color.x += 120; // // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; if (gropix==1) dst[pixel] = color; else for (int i=0;i<gropix;i++) for (int j=0;j<gropix;j++) if ((ix+i<imageW)&&(iy+j<imageH)) dst[pixel+i+imageW*j] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // Julia4D0 // The host CPU Mandebrot thread spawner void RunJulia4Drepart(uchar4 *dst, const int imageW, const int imageH, const float4 Off, const float4 JS, const float4 angle, const double scale, const double scalei, const double xJOff, const double yJOff, const double scaleJ, const float xblur, const float yblur, const unsigned int maxgropix, const unsigned int gropix, const unsigned int bloc, const unsigned int crn, const uchar4 colors, const int frame, const int animationFrame, const int numSMs, const int julia, const int julia4D, const bool cutjulia) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW/maxgropix, BLOCKDIM_X), iDivUp(imageH/(maxgropix), BLOCKDIM_Y)); // zero block counter // unsigned int hBlockCounter = (((grid.x)*(grid.y)/nbloc)+1)*(bloc); unsigned int hBlockCounter = 0; /*cutilSafeCall( */ cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice /*)*/ ); int numWorkUnit = numSMs; Julia4Drepart<<<numWorkUnit, threads>>>(dst, imageW, imageH, Off, JS, angle, (float)scale, (float)scalei, (float)xJOff, (float)yJOff, (float)scaleJ, xblur, yblur, maxgropix, gropix, bloc, crn, colors, frame, animationFrame, grid.x, (grid.x)*(grid.y), julia, julia4D, cutjulia); // cutilCheckMsg("Julia4D0_sm13 kernel execution failed.\n"); } // RunJulia4D0 // check if we're running in emulation mode int inEmulationMode() { #ifdef __DEVICE_EMULATION__ return 1; #else return 0; #endif } #endif
9c753a3afba245b509a99ed5bab1555e9a906c91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef PADDLE_WITH_HETERPS #include <algorithm> #include <ctime> #include <memory> #include <numeric> #include "paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h" #include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace framework { __global__ void PullCopy(float** dest, const FeatureValue* src, const int64_t* len, int hidden, int slot_num, int total_len, uint64_t** keys) { CUDA_KERNEL_LOOP(i, total_len) { int low = 0; int high = slot_num - 1; while (low < high) { int mid = (low + high) / 2; if (i < len[mid]) high = mid; else low = mid + 1; } int x = low; int y = i - (x ? len[x - 1] : 0); if (*(keys[x] + y) == 0) { *(dest[x] + y * hidden) = 0; *(dest[x] + y * hidden + 1) = 0; *(dest[x] + y * hidden + 2) = 0; } else { *(dest[x] + y * hidden) = (src + i)->show; *(dest[x] + y * hidden + 1) = (src + i)->clk; *(dest[x] + y * hidden + 2) = (src + i)->lr; } if ((src + i)->mf_size == 0 || *(keys[x] + y) == 0) { for (int j = 0; j < hidden - 3; j++) { *(dest[x] + y * hidden + 3 + j) = 0; } } else { for (int j = 0; j < hidden - 3; j++) { *(dest[x] + y * hidden + 3 + j) = (src + i)->mf[1 + j]; } } } } __global__ void CopyKeysKernel(uint64_t** src_keys, uint64_t* dest_total_keys, const int64_t* len, int slot_num, int total_len) { CUDA_KERNEL_LOOP(i, total_len) { int low = 0; int high = slot_num - 1; while (low < high) { int mid = (low + high) / 2; if (i < len[mid]) high = mid; else low = mid + 1; } int x = low; int y = i - (x ? len[x - 1] : 0); dest_total_keys[i] = src_keys[x][y]; } } __global__ void PushCopy(FeaturePushValue* dest, float** src, int64_t* len, int hidden, int slot_num, int total_len, int bs, int* slot_vector) { CUDA_KERNEL_LOOP(i, total_len) { int low = 0; int high = slot_num - 1; while (low < high) { int mid = (low + high) / 2; if (i < len[mid]) high = mid; else low = mid + 1; } int x = low; int y = i - (x ? len[low - 1] : 0); (dest + i)->slot = slot_vector[x]; (dest + i)->show = *(src[x] + y * hidden); (dest + i)->clk = *(src[x] + y * hidden + 1); (dest + i)->lr_g = *(src[x] + y * hidden + 2) * -1. * bs; for (int j = 0; j < hidden - 3; j++) { (dest + i)->mf_g[j] = *(src[x] + y * hidden + 3 + j) * -1. * bs; } } } void PSGPUWrapper::CopyForPull(const paddle::platform::Place& place, uint64_t** gpu_keys, const std::vector<float*>& values, const FeatureValue* total_values_gpu, const int64_t* gpu_len, const int slot_num, const int hidden_size, const int64_t total_length) { auto stream = dynamic_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get( BOOST_GET_CONST(platform::CUDAPlace, place))) ->stream(); auto buf_value = memory::AllocShared(place, values.size() * sizeof(float*)); float** gpu_values = reinterpret_cast<float**>(buf_value->ptr()); hipMemcpy(gpu_values, values.data(), values.size() * sizeof(float*), hipMemcpyHostToDevice); hipLaunchKernelGGL(( PullCopy), dim3((total_length + 1024 - 1) / 1024), dim3(1024), 0, stream, gpu_values, total_values_gpu, gpu_len, hidden_size, slot_num, total_length, gpu_keys); hipStreamSynchronize(stream); } void PSGPUWrapper::CopyKeys(const paddle::platform::Place& place, uint64_t** origin_keys, uint64_t* total_keys, const int64_t* gpu_len, int slot_num, int total_len) { auto stream = dynamic_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get( BOOST_GET_CONST(platform::CUDAPlace, place))) ->stream(); hipLaunchKernelGGL(( CopyKeysKernel), dim3((total_len + 1024 - 1) / 1024), dim3(1024), 0, stream, origin_keys, total_keys, gpu_len, slot_num, total_len); hipStreamSynchronize(stream); } void PSGPUWrapper::CopyForPush(const paddle::platform::Place& place, const std::vector<const float*>& grad_values, FeaturePushValue* total_grad_values_gpu, const std::vector<int64_t>& slot_lengths, const int hidden_size, const int64_t total_length, const int batch_size) { auto stream = dynamic_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get( BOOST_GET_CONST(platform::CUDAPlace, place))) ->stream(); auto slot_lengths_lod = slot_lengths; for (int i = 1; i < slot_lengths_lod.size(); i++) { slot_lengths_lod[i] += slot_lengths_lod[i - 1]; } auto buf_grad_value = memory::AllocShared(place, grad_values.size() * sizeof(float*)); auto buf_length = memory::AllocShared(place, slot_lengths.size() * sizeof(int64_t)); auto buf_slot_vector = memory::AllocShared(place, slot_lengths_lod.size() * sizeof(int)); float** gpu_values = reinterpret_cast<float**>(buf_grad_value->ptr()); int64_t* gpu_len = reinterpret_cast<int64_t*>(buf_length->ptr()); int* d_slot_vector = reinterpret_cast<int*>(buf_slot_vector->ptr()); hipMemcpy(gpu_values, grad_values.data(), grad_values.size() * sizeof(float*), hipMemcpyHostToDevice); hipMemcpy(gpu_len, slot_lengths_lod.data(), slot_lengths.size() * sizeof(int64_t), hipMemcpyHostToDevice); hipMemcpy(d_slot_vector, slot_vector_.data(), slot_lengths_lod.size() * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( PushCopy), dim3((total_length + 1024 - 1) / 1024), dim3(1024), 0, stream, total_grad_values_gpu, gpu_values, gpu_len, hidden_size, slot_lengths.size(), total_length, batch_size, d_slot_vector); hipStreamSynchronize(stream); } void PSGPUWrapper::SetSparseSGD(float nonclk_coeff, float clk_coeff, float min_bound, float max_bound, float learning_rate, float initial_g2sum, float initial_range) { hipMemcpyToSymbol(optimizer_config::nonclk_coeff, &nonclk_coeff, sizeof(float)); hipMemcpyToSymbol(optimizer_config::clk_coeff, &clk_coeff, sizeof(float)); hipMemcpyToSymbol(optimizer_config::min_bound, &min_bound, sizeof(float)); hipMemcpyToSymbol(optimizer_config::max_bound, &max_bound, sizeof(float)); hipMemcpyToSymbol(optimizer_config::learning_rate, &learning_rate, sizeof(float)); hipMemcpyToSymbol(optimizer_config::initial_g2sum, &initial_g2sum, sizeof(float)); hipMemcpyToSymbol(optimizer_config::initial_range, &initial_range, sizeof(float)); } void PSGPUWrapper::SetEmbedxSGD(float mf_create_thresholds, float mf_learning_rate, float mf_initial_g2sum, float mf_initial_range, float mf_min_bound, float mf_max_bound) { hipMemcpyToSymbol(optimizer_config::mf_create_thresholds, &mf_create_thresholds, sizeof(float)); hipMemcpyToSymbol(optimizer_config::mf_learning_rate, &mf_learning_rate, sizeof(float)); hipMemcpyToSymbol(optimizer_config::mf_initial_g2sum, &mf_initial_g2sum, sizeof(float)); hipMemcpyToSymbol(optimizer_config::mf_initial_range, &mf_initial_range, sizeof(float)); hipMemcpyToSymbol(optimizer_config::mf_min_bound, &mf_min_bound, sizeof(float)); hipMemcpyToSymbol(optimizer_config::mf_max_bound, &mf_max_bound, sizeof(float)); } } // end namespace framework } // end namespace paddle #endif
9c753a3afba245b509a99ed5bab1555e9a906c91.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef PADDLE_WITH_HETERPS #include <algorithm> #include <ctime> #include <memory> #include <numeric> #include "paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h" #include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace framework { __global__ void PullCopy(float** dest, const FeatureValue* src, const int64_t* len, int hidden, int slot_num, int total_len, uint64_t** keys) { CUDA_KERNEL_LOOP(i, total_len) { int low = 0; int high = slot_num - 1; while (low < high) { int mid = (low + high) / 2; if (i < len[mid]) high = mid; else low = mid + 1; } int x = low; int y = i - (x ? len[x - 1] : 0); if (*(keys[x] + y) == 0) { *(dest[x] + y * hidden) = 0; *(dest[x] + y * hidden + 1) = 0; *(dest[x] + y * hidden + 2) = 0; } else { *(dest[x] + y * hidden) = (src + i)->show; *(dest[x] + y * hidden + 1) = (src + i)->clk; *(dest[x] + y * hidden + 2) = (src + i)->lr; } if ((src + i)->mf_size == 0 || *(keys[x] + y) == 0) { for (int j = 0; j < hidden - 3; j++) { *(dest[x] + y * hidden + 3 + j) = 0; } } else { for (int j = 0; j < hidden - 3; j++) { *(dest[x] + y * hidden + 3 + j) = (src + i)->mf[1 + j]; } } } } __global__ void CopyKeysKernel(uint64_t** src_keys, uint64_t* dest_total_keys, const int64_t* len, int slot_num, int total_len) { CUDA_KERNEL_LOOP(i, total_len) { int low = 0; int high = slot_num - 1; while (low < high) { int mid = (low + high) / 2; if (i < len[mid]) high = mid; else low = mid + 1; } int x = low; int y = i - (x ? len[x - 1] : 0); dest_total_keys[i] = src_keys[x][y]; } } __global__ void PushCopy(FeaturePushValue* dest, float** src, int64_t* len, int hidden, int slot_num, int total_len, int bs, int* slot_vector) { CUDA_KERNEL_LOOP(i, total_len) { int low = 0; int high = slot_num - 1; while (low < high) { int mid = (low + high) / 2; if (i < len[mid]) high = mid; else low = mid + 1; } int x = low; int y = i - (x ? len[low - 1] : 0); (dest + i)->slot = slot_vector[x]; (dest + i)->show = *(src[x] + y * hidden); (dest + i)->clk = *(src[x] + y * hidden + 1); (dest + i)->lr_g = *(src[x] + y * hidden + 2) * -1. * bs; for (int j = 0; j < hidden - 3; j++) { (dest + i)->mf_g[j] = *(src[x] + y * hidden + 3 + j) * -1. * bs; } } } void PSGPUWrapper::CopyForPull(const paddle::platform::Place& place, uint64_t** gpu_keys, const std::vector<float*>& values, const FeatureValue* total_values_gpu, const int64_t* gpu_len, const int slot_num, const int hidden_size, const int64_t total_length) { auto stream = dynamic_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get( BOOST_GET_CONST(platform::CUDAPlace, place))) ->stream(); auto buf_value = memory::AllocShared(place, values.size() * sizeof(float*)); float** gpu_values = reinterpret_cast<float**>(buf_value->ptr()); cudaMemcpy(gpu_values, values.data(), values.size() * sizeof(float*), cudaMemcpyHostToDevice); PullCopy<<<(total_length + 1024 - 1) / 1024, 1024, 0, stream>>>( gpu_values, total_values_gpu, gpu_len, hidden_size, slot_num, total_length, gpu_keys); cudaStreamSynchronize(stream); } void PSGPUWrapper::CopyKeys(const paddle::platform::Place& place, uint64_t** origin_keys, uint64_t* total_keys, const int64_t* gpu_len, int slot_num, int total_len) { auto stream = dynamic_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get( BOOST_GET_CONST(platform::CUDAPlace, place))) ->stream(); CopyKeysKernel<<<(total_len + 1024 - 1) / 1024, 1024, 0, stream>>>( origin_keys, total_keys, gpu_len, slot_num, total_len); cudaStreamSynchronize(stream); } void PSGPUWrapper::CopyForPush(const paddle::platform::Place& place, const std::vector<const float*>& grad_values, FeaturePushValue* total_grad_values_gpu, const std::vector<int64_t>& slot_lengths, const int hidden_size, const int64_t total_length, const int batch_size) { auto stream = dynamic_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get( BOOST_GET_CONST(platform::CUDAPlace, place))) ->stream(); auto slot_lengths_lod = slot_lengths; for (int i = 1; i < slot_lengths_lod.size(); i++) { slot_lengths_lod[i] += slot_lengths_lod[i - 1]; } auto buf_grad_value = memory::AllocShared(place, grad_values.size() * sizeof(float*)); auto buf_length = memory::AllocShared(place, slot_lengths.size() * sizeof(int64_t)); auto buf_slot_vector = memory::AllocShared(place, slot_lengths_lod.size() * sizeof(int)); float** gpu_values = reinterpret_cast<float**>(buf_grad_value->ptr()); int64_t* gpu_len = reinterpret_cast<int64_t*>(buf_length->ptr()); int* d_slot_vector = reinterpret_cast<int*>(buf_slot_vector->ptr()); cudaMemcpy(gpu_values, grad_values.data(), grad_values.size() * sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(gpu_len, slot_lengths_lod.data(), slot_lengths.size() * sizeof(int64_t), cudaMemcpyHostToDevice); cudaMemcpy(d_slot_vector, slot_vector_.data(), slot_lengths_lod.size() * sizeof(int), cudaMemcpyHostToDevice); PushCopy<<<(total_length + 1024 - 1) / 1024, 1024, 0, stream>>>( total_grad_values_gpu, gpu_values, gpu_len, hidden_size, slot_lengths.size(), total_length, batch_size, d_slot_vector); cudaStreamSynchronize(stream); } void PSGPUWrapper::SetSparseSGD(float nonclk_coeff, float clk_coeff, float min_bound, float max_bound, float learning_rate, float initial_g2sum, float initial_range) { cudaMemcpyToSymbol(optimizer_config::nonclk_coeff, &nonclk_coeff, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::clk_coeff, &clk_coeff, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::min_bound, &min_bound, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::max_bound, &max_bound, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::learning_rate, &learning_rate, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::initial_g2sum, &initial_g2sum, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::initial_range, &initial_range, sizeof(float)); } void PSGPUWrapper::SetEmbedxSGD(float mf_create_thresholds, float mf_learning_rate, float mf_initial_g2sum, float mf_initial_range, float mf_min_bound, float mf_max_bound) { cudaMemcpyToSymbol(optimizer_config::mf_create_thresholds, &mf_create_thresholds, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::mf_learning_rate, &mf_learning_rate, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::mf_initial_g2sum, &mf_initial_g2sum, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::mf_initial_range, &mf_initial_range, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::mf_min_bound, &mf_min_bound, sizeof(float)); cudaMemcpyToSymbol(optimizer_config::mf_max_bound, &mf_max_bound, sizeof(float)); } } // end namespace framework } // end namespace paddle #endif
241acb1c5b8702f0744581da68c6625b40c7b89f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vector_add(int *a, int *b, int *c){ int index = blockIdx.x * blockDim.x + threadIdx.x; c[index] = a[index] + b[index] ; }
241acb1c5b8702f0744581da68c6625b40c7b89f.cu
#include "includes.h" __global__ void vector_add(int *a, int *b, int *c){ int index = blockIdx.x * blockDim.x + threadIdx.x; c[index] = a[index] + b[index] ; }
ae40a25fba1a2fc5dd34a211f736e2dc14e93cfa.hip
// !!! This is a file automatically generated by hipify!!! // #include <iostream> #include <hip/hip_runtime.h> using namespace std; __global__ void add(float *a, float *b, float *c) { if (a[threadIdx.x] + b[threadIdx.x] < 10) c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; else c[threadIdx.x] = 10; } // #define N 64 int main(void) { float *a, *b, *c; // host a, b, c float *dev_a, *dev_b, *dev_c; // device a, b, c int size = N * sizeof(float); // device a, b, c hipMalloc((void**)&dev_a, size); hipMalloc((void**)&dev_b, size); hipMalloc((void**)&dev_c, size); a = (float*) malloc(size); b = (float*) malloc(size); c = (float*) malloc(size); for (int i = 0; i < N; ++i) a[i] = (float)rand() / (float)RAND_MAX; for (int i = 0; i < N; ++i) b[i] = (float)rand() / (float)RAND_MAX; // device hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice); // launch add() kernel with N parallel blocks // N 1 hipLaunchKernelGGL(( add), dim3(1), dim3(N) , 0, 0, dev_a, dev_b, dev_c); // N 1 - // N 1 // add<<< N, 1 >>>(dev_a, dev_b, dev_c); // , //add<<< N, 1 >>>(dev_a, dev_b, dev_c); // device host c hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost); for(int i = 0; i < N; ++i) cout<<"c[" << i << "]=" << c[i] << endl; free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
ae40a25fba1a2fc5dd34a211f736e2dc14e93cfa.cu
// Сложение векторов и сравнение с количеством тредов в памяти #include <iostream> #include <cuda.h> using namespace std; __global__ void add(float *a, float *b, float *c) { if (a[threadIdx.x] + b[threadIdx.x] < 10) c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; else c[threadIdx.x] = 10; } //Количество суммирований и сравнений #define N 64 int main(void) { float *a, *b, *c; // host копии a, b, c float *dev_a, *dev_b, *dev_c; // device копии a, b, c int size = N * sizeof(float); //выделяем память для device копий a, b, c cudaMalloc((void**)&dev_a, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); a = (float*) malloc(size); b = (float*) malloc(size); c = (float*) malloc(size); for (int i = 0; i < N; ++i) a[i] = (float)rand() / (float)RAND_MAX; for (int i = 0; i < N; ++i) b[i] = (float)rand() / (float)RAND_MAX; // копируем ввод на device cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); // launch add() kernel with N parallel blocks //Использование N тредов и 1 блока add<<< 1, N >>>(dev_a, dev_b, dev_c); //Использование N блоков И 1 тре- // Если мы будем использовать N блоков и 1 тред // add<<< N, 1 >>>(dev_a, dev_b, dev_c); // то каждый результат с будет равным нулю, кроме первого //add<<< N, 1 >>>(dev_a, dev_b, dev_c); // копируем результат работы device обратно на host – копию c cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); for(int i = 0; i < N; ++i) cout<<"c[" << i << "]=" << c[i] << endl; free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
bb42163ab35bec13d141c699bc9349d8b83f8256.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <hip/hip_runtime.h> #include <cmath> extern __global__ void vecAdd(double *a, double *b, double *c, int n); TEST(CUDA, VecAdd) { // Size of vectors int n = 2048; // Host input vectors double *h_a; double *h_b; //Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n * sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); int i; // Initialize vectors on host for (i = 0; i < n; i++) { h_a[i] = sin(i) * sin(i); h_b[i] = cos(i) * cos(i); } // Copy host vectors to device hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float) n / blockSize); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n); // Copy array back to host hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; for(i = 0; i < n; i++) sum += h_c[i]; EXPECT_EQ(1, sum/n); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); }
bb42163ab35bec13d141c699bc9349d8b83f8256.cu
#include <gtest/gtest.h> #include <cuda_runtime.h> #include <cmath> extern __global__ void vecAdd(double *a, double *b, double *c, int n); TEST(CUDA, VecAdd) { // Size of vectors int n = 2048; // Host input vectors double *h_a; double *h_b; //Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n * sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); int i; // Initialize vectors on host for (i = 0; i < n; i++) { h_a[i] = sin(i) * sin(i); h_b[i] = cos(i) * cos(i); } // Copy host vectors to device cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float) n / blockSize); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Copy array back to host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; for(i = 0; i < n; i++) sum += h_c[i]; EXPECT_EQ(1, sum/n); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); }
4c4d8edf2bc6820e17293ed7ca91f2c87ee7cbd1.hip
// !!! This is a file automatically generated by hipify!!! /* * Inverse Discrete Sine Transform in row wise (DST four) * DST_IV_Row_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DST_IV_Row_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DST_IV_Row_Inverse.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DSTIV_Row__InverseKernel(double *A, double *C, int numARows, int numAColumns, int numCRows, int numCColumns) { double CValue = 0.0; const double PI_d = 3.141592653589793238462643383279502884; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } //Bs[threadIdx.y][threadIdx.x] = cos(((2 * (threadIdx.y + k*TILE_DIM) + 1)*PI_d*(2 * Col + 1) / (4.0 * numAColumns)))*sqrt(2.0 / numAColumns); if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = sin((((threadIdx.y + k*TILE_DIM) + 0.5)*PI_d*(Col + 0.5)) / (numAColumns))*sqrt(2.0 / (numAColumns)); } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDSTInverseRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix //double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //float * hostComputedC; double * deviceA; //double * deviceB; double * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns)); //hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns)); //thrust::device_ptr< double >dev_ptr_A(deviceA); //thrust::device_ptr< double >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice)); //hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////// dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTIV_Row__InverseKernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; }
4c4d8edf2bc6820e17293ed7ca91f2c87ee7cbd1.cu
/* * Inverse Discrete Sine Transform in row wise (DST four) * DST_IV_Row_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DST_IV_Row_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DST_IV_Row_Inverse.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cuda.h> #include <cuda_runtime.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DSTIV_Row__InverseKernel(double *A, double *C, int numARows, int numAColumns, int numCRows, int numCColumns) { double CValue = 0.0; const double PI_d = 3.141592653589793238462643383279502884; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } //Bs[threadIdx.y][threadIdx.x] = cos(((2 * (threadIdx.y + k*TILE_DIM) + 1)*PI_d*(2 * Col + 1) / (4.0 * numAColumns)))*sqrt(2.0 / numAColumns); if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = sin((((threadIdx.y + k*TILE_DIM) + 0.5)*PI_d*(Col + 0.5)) / (numAColumns))*sqrt(2.0 / (numAColumns)); } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDSTInverseRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix //double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //float * hostComputedC; double * deviceA; //double * deviceB; double * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns)); //cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns)); //thrust::device_ptr< double >dev_ptr_A(deviceA); //thrust::device_ptr< double >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////// dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTIV_Row__InverseKernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; }
929e688cb10bb1aaf48e82b4e56f22494367d90e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2020 Tokyo Institute of Technology */ //============================================================================// // // Software Name : MEGADOCK // // cuda_kernel.cu // // Contact address : Tokyo Institute of Technology, AKIYAMA Lab. // //============================================================================// #include <stdio.h> #define FMAX(a,b) ( ((a)>(b) ) ? (a) : (b) ) #define FMIN(a,b) ( ((a)>(b) ) ? (b) : (a) ) __global__ void lig_vox_fill(int ng1 ,int na ,float delta ,float *radius2 ,float *xd ,float *yd ,float *zd ,float *grid_coord ,float *atom_coord_rotated ,float *grid_r ,float grid_width) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; //const int ng3 = ng2 * ng1; if(id < na) { //const int search_range = 2; const int search_range = (2.4 + grid_width -0.01) / grid_width; const int lc = ng1 * id; const int id3 = id * 3; const int i2 = atom_coord_rotated[id3 ] / grid_width + ng1 / 2; const int j2 = atom_coord_rotated[id3+1] / grid_width + ng1 / 2; const int k2 = atom_coord_rotated[id3+2] / grid_width + ng1 / 2; const int ia = FMAX(i2 - search_range, 0); const int ja = FMAX(j2 - search_range, 0); const int ka = FMAX(k2 - search_range, 0); const int ib = FMIN(i2 + search_range+1, ng1); const int jb = FMIN(j2 + search_range+1, ng1); const int kb = FMIN(k2 + search_range+1, ng1); for( int i = ia ; i < ib ; i++ ) {// grid around atom[l] if(xd[lc+i] > radius2[id]) continue; for( int j = ja ; j < jb ; j++ ) { const float d2 = xd[lc+i]+yd[lc+j]; if(d2 > radius2[id]) continue; const int ij = ng2*i+ng1*j; for( int k = ka ; k < kb ; k++ ) { const float d3 = d2 + zd[lc+k]; if( d3 < radius2[id] ) {// distance(grid-atom) < Van der Waals radius (* core) grid_r[ij+k] = delta; // grid[i] is filled up by atom[l] } } } } } //*/ } __global__ void lig_rotation(int na, float *theta, float *atom_coord_orig, float *mole_center_coord, float *atom_coord_rotated) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const float r11 = cos(theta[0])*cos(theta[2]) - sin(theta[0])*cos(theta[1])*sin(theta[2]); const float r21 = sin(theta[0])*cos(theta[2]) + cos(theta[0])*cos(theta[1])*sin(theta[2]); const float r31 = sin(theta[1])*sin(theta[2]); const float r12 = -cos(theta[0])*sin(theta[2]) - sin(theta[0])*cos(theta[1])*cos(theta[2]); const float r22 = -sin(theta[0])*sin(theta[2]) + cos(theta[0])*cos(theta[1])*cos(theta[2]); const float r32 = sin(theta[1])*cos(theta[2]); const float r13 = sin(theta[0])*sin(theta[1]); const float r23 = -cos(theta[0])*sin(theta[1]); const float r33 = cos(theta[1]); if(id < na) { const int id3 = id * 3; float x, y, z; x = atom_coord_orig[id3 ] - mole_center_coord[0]; y = atom_coord_orig[id3+1] - mole_center_coord[1]; z = atom_coord_orig[id3+2] - mole_center_coord[2]; atom_coord_rotated[id3 ] = r11 * x + r12 * y + r13 * z; atom_coord_rotated[id3+1] = r21 * x + r22 * y + r23 * z; atom_coord_rotated[id3+2] = r31 * x + r32 * y + r33 * z; } } __global__ void lig_calc_dis_atomgrid(int na, int ng1, float *xd, float *yd, float *zd, float *grid_coord, float *atom_coord_rotated) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int nag = na * ng1; if(id < nag) { const int cur_atom = (id / ng1); const int cur_atom3 = cur_atom * 3; const int cur_grid = id % ng1; xd[id] = atom_coord_rotated[cur_atom3 ] - grid_coord[cur_grid]; yd[id] = atom_coord_rotated[cur_atom3+1] - grid_coord[cur_grid]; zd[id] = atom_coord_rotated[cur_atom3+2] - grid_coord[cur_grid]; xd[id] *= xd[id]; yd[id] *= yd[id]; zd[id] *= zd[id]; } } __global__ void lig_vox_init_grid(int ng3,float *grid_r,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < ng3) { //initialize grid_r[id]=0.0; grid_i[id]=0.0; } } __global__ void lig_vox_init_fft(int nf3,hipfftComplex *lig_in) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < nf3) { //initialize lig_in[id] = make_cuComplex( 0.0, 0.0); //lig_in[id].x=0.0; //lig_in[id].y=0.0; } } __global__ void ligand_voxel_set(int ng1 ,hipfftComplex *lig_in ,float *grid_r ,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; const int ng3 = ng2 * ng1; const int nf1 = ng1 * 2; const int nf2 = nf1 * nf1; const int ng1_half = ng1 / 2; const float surface = 1.0; // grid-assignment score (protein surface) //if(id == 0) for(int i=0;i<ng3;i++) if(grid_i[i]!=0.0) printf(" [%03d,%03d,%03d] : %6.3f\n",i/ng2,i/ng1%ng1,i%ng1,grid_i[i]); //if(id == 0) for(int i=0;i<ng3;i++) printf(" [%03d,%03d,%03d] : %6.3f\n",i/ng2,(i/ng1)%ng1,i%ng1,grid_i[i]); if(id < ng3) { const int i = id / ng2; const int j = (id / ng1) % ng1; const int k = id % ng1; const int idoff = (i + ng1_half) * nf2 + (j + ng1_half) * nf1 + (k + ng1_half); //* if(grid_r[id]==surface) {// this condition judges whether surface(1.0) or temporary score(-8888.0) lig_in[idoff] = make_cuComplex( grid_r[id], grid_i[id]); } else { lig_in[idoff] = make_cuComplex( 0.0, grid_i[id]); } //* } } __global__ void lig_vox_surface_cut_CtoT(int ng1, float delta, float *grid_r) { // Core score to Temporary score const int id = blockIdx.x * blockDim.x + threadIdx.x; const float swollen_surface = -8888.0; const int ng2 = ng1 * ng1; const int ng3 = ng2 * ng1; if(id < ng3) { const int i = id / ng2; const int j = (id / ng1) % ng1; const int k = id % ng1; if(i==0||i==ng1-1||j==0||j==ng1-1||k==0||k==ng1-1) { // skip border } else { if(grid_r[id]==delta) { if(grid_r[id-1]==0 || grid_r[id+1]==0 || grid_r[id-ng1]==0 || grid_r[id+ng1]==0 || grid_r[id-ng2]==0 || grid_r[id+ng2]==0) { grid_r[id]=swollen_surface; } } } } } __global__ void lig_vox_elec(int ng1,int na,float grid_width,float *_Charge,float *atom_coord_rotated,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; const float pad = (ng1 * grid_width / 2); //* if(id < na) { //if(!fabs(_Charge[id]) < 0.0001) continue; const int id3 = id * 3; //const int i = (atom_coord_rotated[id3 ] + pad) / grid_width; //const int j = (atom_coord_rotated[id3+1] + pad) / grid_width; //const int k = (atom_coord_rotated[id3+2] + pad) / grid_width; const int i = FMAX(0, FMIN((atom_coord_rotated[id3 ] + pad) / grid_width, ng1 - 1)); const int j = FMAX(0, FMIN((atom_coord_rotated[id3+1] + pad) / grid_width, ng1 - 1)); const int k = FMAX(0, FMIN((atom_coord_rotated[id3+2] + pad) / grid_width, ng1 - 1)); //grid_i[i*ng2+j*ng1+k] += _Charge[id]; //printf(" %08d-1 : %.2f, %.2f\n",i*ng2+j*ng1+k,grid_i[i*ng2+j*ng1+k],_Charge[id]); atomicAdd(&grid_i[i*ng2+j*ng1+k],_Charge[id]); //printf(" %08d-2 : %.2f, %.2f\n",i*ng2+j*ng1+k,grid_i[i*ng2+j*ng1+k],_Charge[id]); } //*/ } __global__ void lig_vox_elec_serial(int ng1,int na,float grid_width,float *_Charge,float *atom_coord_rotated,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; const float pad = (ng1 * grid_width / 2); if(id==0) { for( int l = 0 ; l < na ; l++ ) { const int l3 = l*3; const int i =(atom_coord_rotated[l3 ] + pad) / grid_width; const int j =(atom_coord_rotated[l3+1] + pad) / grid_width; const int k =(atom_coord_rotated[l3+2] + pad) / grid_width; //printf(" [%5d] [x:%12.8f,y:%12.8f,z:%12.8f] [pad:%6.3f], [%3d,%3d,%3d] \n",l,atom_coord_rotated[l3 ],atom_coord_rotated[l3+1],atom_coord_rotated[l3+2],pad,i,j,k); //printf(" [%5d] [x:%8.0f,y:%8.0f,z:%8.0f] [pad:%6.3f], [%3d,%3d,%3d] \n",l,atom_coord_rotated[l3 ],atom_coord_rotated[l3+1],atom_coord_rotated[l3+2],pad,i,j,k); //if(grid_i[i*ng2+j*ng1+k]!=0)printf(" Pos : %d, current : %f, new : %f\n",i*ng2+j*ng1+k, grid_i[i*ng2+j*ng1+k], _Charge[l]); grid_i[i*ng2+j*ng1+k] += _Charge[l]; } } } __device__ void lig_vox_surface_cut_TtoO(int ng3, float delta, float *grid_r) { // Temporary score to Open space score const int id = blockIdx.x * blockDim.x + threadIdx.x; const float swollen_surface = -8888.0; // temporary score for swollen ligand surface if(id < ng3) { if(grid_r[id]==swollen_surface) { grid_r[id]=0.0; } } } __global__ void convolution_gpu(int nf3, float *rec_r, float *rec_i, hipfftComplex *lig_out, hipfftComplex *lig_in) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id<nf3) { const float lig_r = lig_out[id].x; const float lig_i = lig_out[id].y; lig_in[id] = make_cuComplex( rec_r[id]*lig_r + rec_i[id]*lig_i, rec_r[id]*lig_i - rec_i[id]*lig_r); //lig_in[id].x = rec_r[id]*lig_r + rec_i[id]*lig_i; //lig_in[id].y = rec_r[id]*lig_i - rec_i[id]*lig_r; } } __global__ void max_pos_single(int nf3, hipfftComplex *out, float *score, int *pos) { //blockDim.x = nThreads //score[nBlocks], pos[nBlocks] (nBlocks = nf3 / nThreads) //sdata[nThreads] extern __shared__ float sdata[]; float mscore; const int thr_id = threadIdx.x; const int nThreads = blockDim.x; const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < nf3) { mscore = sdata[thr_id] = cuCrealf(out[id])/nf3; __syncthreads(); //all threads set sdata[thr_id] //reduction for(int offset = nThreads / 2; offset > 0; offset /= 2) { if (thr_id < offset) { sdata[thr_id] = FMAX(sdata[thr_id], sdata[thr_id + offset]); } __syncthreads(); } if (mscore == sdata[0]) {//mscore specify position of max score score[blockIdx.x] = sdata[0]; pos[blockIdx.x] = id; //printf(" BLOCK ID:%d, sdata[0]=%f, pos=%d\n",blockIdx.x,sdata[0],id); } } } __global__ void max_pos_multi_set(int nf3, hipfftComplex *out, float *temp_score, int *temp_index) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < nf3) { temp_score[id] = out[id].x; temp_index[id] = id; } } //, std::vector<hipfftComplex> *temp_result , thrust::vector<hipfftComplex> *temp_result //thrust::device_ptr<hipfftComplex> *temp_result hipfftComplex *temp_result,thrust::device_ptr<hipfftComplex> temp_result __global__ void max_pos_multi(int nf3, hipfftComplex *out, float *score, int *pos,const int num_sort,const int offset) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < offset) { if (out[id].x < out[id+offset].x) { out[id].x = out[id+offset].x; out[id].y = out[id+offset].y; } /* if(id==0) { for(int i=0; i<num_sort*2; i++) printf(" id=%d, %f %f\n",i,out[i].x/nf3,out[i].y); printf("\n"); } //*/ } //*/ } /* __global__ void max_pos_multi(int nf3, hipfftComplex *out, float *score, int *pos,const int num_sort, float *temp_score, int *temp_index) { //blockDim.x = nThreads, //score[nBlocks], pos[nBlocks] (nBlocks = nf3 / nThreads) //sdata[nThreads] extern __shared__ float sdata[]; float mscore; int offset; const int thr_id = threadIdx.x; const int nThreads = blockDim.x; const int id = blockIdx.x * blockDim.x + threadIdx.x; //* if(id < nf3) { temp_score[id]=cuCrealf(out[id])/nf3; temp_index[id]=id; /* __syncthreads(); //all threads set sdata[thr_id] //reduction for(offset = nThreads / 2; offset > num_sort; ) { offset /= 2; if (thr_id < offset) { sdata[thr_id] = FMAX(sdata[thr_id], sdata[thr_id + offset]); } //if(id<1)printf(" id=%d, t=%d, off=%d\n",id,num_sort,offset); __syncthreads(); } //if(id<1)printf(" [last] id=%d, t=%d, off=%d\n",id,num_sort,offset); //thrust::sort(sdata,sdata+10); if(id < num_sort) { if (mscore == sdata[id]) {//mscore specify position of max score (float equality comparison... amari yokunai) score[blockIdx.x] = sdata[0]; pos[blockIdx.x] = id; //printf(" BLOCK ID:%d, sdata[0]=%f, pos=%d\n",blockIdx.x,sdata[0],i); } } //* if(temp_score[id] >3000) printf(" id=%d, %f %d\n",id,temp_score[id],temp_index[id]); } //* } //*/
929e688cb10bb1aaf48e82b4e56f22494367d90e.cu
/* * Copyright (C) 2020 Tokyo Institute of Technology */ //============================================================================// // // Software Name : MEGADOCK // // cuda_kernel.cu // // Contact address : Tokyo Institute of Technology, AKIYAMA Lab. // //============================================================================// #include <stdio.h> #define FMAX(a,b) ( ((a)>(b) ) ? (a) : (b) ) #define FMIN(a,b) ( ((a)>(b) ) ? (b) : (a) ) __global__ void lig_vox_fill(int ng1 ,int na ,float delta ,float *radius2 ,float *xd ,float *yd ,float *zd ,float *grid_coord ,float *atom_coord_rotated ,float *grid_r ,float grid_width) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; //const int ng3 = ng2 * ng1; if(id < na) { //const int search_range = 2; const int search_range = (2.4 + grid_width -0.01) / grid_width; const int lc = ng1 * id; const int id3 = id * 3; const int i2 = atom_coord_rotated[id3 ] / grid_width + ng1 / 2; const int j2 = atom_coord_rotated[id3+1] / grid_width + ng1 / 2; const int k2 = atom_coord_rotated[id3+2] / grid_width + ng1 / 2; const int ia = FMAX(i2 - search_range, 0); const int ja = FMAX(j2 - search_range, 0); const int ka = FMAX(k2 - search_range, 0); const int ib = FMIN(i2 + search_range+1, ng1); const int jb = FMIN(j2 + search_range+1, ng1); const int kb = FMIN(k2 + search_range+1, ng1); for( int i = ia ; i < ib ; i++ ) {// grid around atom[l] if(xd[lc+i] > radius2[id]) continue; for( int j = ja ; j < jb ; j++ ) { const float d2 = xd[lc+i]+yd[lc+j]; if(d2 > radius2[id]) continue; const int ij = ng2*i+ng1*j; for( int k = ka ; k < kb ; k++ ) { const float d3 = d2 + zd[lc+k]; if( d3 < radius2[id] ) {// distance(grid-atom) < Van der Waals radius (* core) grid_r[ij+k] = delta; // grid[i] is filled up by atom[l] } } } } } //*/ } __global__ void lig_rotation(int na, float *theta, float *atom_coord_orig, float *mole_center_coord, float *atom_coord_rotated) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const float r11 = cos(theta[0])*cos(theta[2]) - sin(theta[0])*cos(theta[1])*sin(theta[2]); const float r21 = sin(theta[0])*cos(theta[2]) + cos(theta[0])*cos(theta[1])*sin(theta[2]); const float r31 = sin(theta[1])*sin(theta[2]); const float r12 = -cos(theta[0])*sin(theta[2]) - sin(theta[0])*cos(theta[1])*cos(theta[2]); const float r22 = -sin(theta[0])*sin(theta[2]) + cos(theta[0])*cos(theta[1])*cos(theta[2]); const float r32 = sin(theta[1])*cos(theta[2]); const float r13 = sin(theta[0])*sin(theta[1]); const float r23 = -cos(theta[0])*sin(theta[1]); const float r33 = cos(theta[1]); if(id < na) { const int id3 = id * 3; float x, y, z; x = atom_coord_orig[id3 ] - mole_center_coord[0]; y = atom_coord_orig[id3+1] - mole_center_coord[1]; z = atom_coord_orig[id3+2] - mole_center_coord[2]; atom_coord_rotated[id3 ] = r11 * x + r12 * y + r13 * z; atom_coord_rotated[id3+1] = r21 * x + r22 * y + r23 * z; atom_coord_rotated[id3+2] = r31 * x + r32 * y + r33 * z; } } __global__ void lig_calc_dis_atomgrid(int na, int ng1, float *xd, float *yd, float *zd, float *grid_coord, float *atom_coord_rotated) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int nag = na * ng1; if(id < nag) { const int cur_atom = (id / ng1); const int cur_atom3 = cur_atom * 3; const int cur_grid = id % ng1; xd[id] = atom_coord_rotated[cur_atom3 ] - grid_coord[cur_grid]; yd[id] = atom_coord_rotated[cur_atom3+1] - grid_coord[cur_grid]; zd[id] = atom_coord_rotated[cur_atom3+2] - grid_coord[cur_grid]; xd[id] *= xd[id]; yd[id] *= yd[id]; zd[id] *= zd[id]; } } __global__ void lig_vox_init_grid(int ng3,float *grid_r,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < ng3) { //initialize grid_r[id]=0.0; grid_i[id]=0.0; } } __global__ void lig_vox_init_fft(int nf3,cufftComplex *lig_in) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < nf3) { //initialize lig_in[id] = make_cuComplex( 0.0, 0.0); //lig_in[id].x=0.0; //lig_in[id].y=0.0; } } __global__ void ligand_voxel_set(int ng1 ,cufftComplex *lig_in ,float *grid_r ,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; const int ng3 = ng2 * ng1; const int nf1 = ng1 * 2; const int nf2 = nf1 * nf1; const int ng1_half = ng1 / 2; const float surface = 1.0; // grid-assignment score (protein surface) //if(id == 0) for(int i=0;i<ng3;i++) if(grid_i[i]!=0.0) printf(" [%03d,%03d,%03d] : %6.3f\n",i/ng2,i/ng1%ng1,i%ng1,grid_i[i]); //if(id == 0) for(int i=0;i<ng3;i++) printf(" [%03d,%03d,%03d] : %6.3f\n",i/ng2,(i/ng1)%ng1,i%ng1,grid_i[i]); if(id < ng3) { const int i = id / ng2; const int j = (id / ng1) % ng1; const int k = id % ng1; const int idoff = (i + ng1_half) * nf2 + (j + ng1_half) * nf1 + (k + ng1_half); //* if(grid_r[id]==surface) {// this condition judges whether surface(1.0) or temporary score(-8888.0) lig_in[idoff] = make_cuComplex( grid_r[id], grid_i[id]); } else { lig_in[idoff] = make_cuComplex( 0.0, grid_i[id]); } //* } } __global__ void lig_vox_surface_cut_CtoT(int ng1, float delta, float *grid_r) { // Core score to Temporary score const int id = blockIdx.x * blockDim.x + threadIdx.x; const float swollen_surface = -8888.0; const int ng2 = ng1 * ng1; const int ng3 = ng2 * ng1; if(id < ng3) { const int i = id / ng2; const int j = (id / ng1) % ng1; const int k = id % ng1; if(i==0||i==ng1-1||j==0||j==ng1-1||k==0||k==ng1-1) { // skip border } else { if(grid_r[id]==delta) { if(grid_r[id-1]==0 || grid_r[id+1]==0 || grid_r[id-ng1]==0 || grid_r[id+ng1]==0 || grid_r[id-ng2]==0 || grid_r[id+ng2]==0) { grid_r[id]=swollen_surface; } } } } } __global__ void lig_vox_elec(int ng1,int na,float grid_width,float *_Charge,float *atom_coord_rotated,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; const float pad = (ng1 * grid_width / 2); //* if(id < na) { //if(!fabs(_Charge[id]) < 0.0001) continue; const int id3 = id * 3; //const int i = (atom_coord_rotated[id3 ] + pad) / grid_width; //const int j = (atom_coord_rotated[id3+1] + pad) / grid_width; //const int k = (atom_coord_rotated[id3+2] + pad) / grid_width; const int i = FMAX(0, FMIN((atom_coord_rotated[id3 ] + pad) / grid_width, ng1 - 1)); const int j = FMAX(0, FMIN((atom_coord_rotated[id3+1] + pad) / grid_width, ng1 - 1)); const int k = FMAX(0, FMIN((atom_coord_rotated[id3+2] + pad) / grid_width, ng1 - 1)); //grid_i[i*ng2+j*ng1+k] += _Charge[id]; //printf(" %08d-1 : %.2f, %.2f\n",i*ng2+j*ng1+k,grid_i[i*ng2+j*ng1+k],_Charge[id]); atomicAdd(&grid_i[i*ng2+j*ng1+k],_Charge[id]); //printf(" %08d-2 : %.2f, %.2f\n",i*ng2+j*ng1+k,grid_i[i*ng2+j*ng1+k],_Charge[id]); } //*/ } __global__ void lig_vox_elec_serial(int ng1,int na,float grid_width,float *_Charge,float *atom_coord_rotated,float *grid_i) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const int ng2 = ng1 * ng1; const float pad = (ng1 * grid_width / 2); if(id==0) { for( int l = 0 ; l < na ; l++ ) { const int l3 = l*3; const int i =(atom_coord_rotated[l3 ] + pad) / grid_width; const int j =(atom_coord_rotated[l3+1] + pad) / grid_width; const int k =(atom_coord_rotated[l3+2] + pad) / grid_width; //printf(" [%5d] [x:%12.8f,y:%12.8f,z:%12.8f] [pad:%6.3f], [%3d,%3d,%3d] \n",l,atom_coord_rotated[l3 ],atom_coord_rotated[l3+1],atom_coord_rotated[l3+2],pad,i,j,k); //printf(" [%5d] [x:%8.0f,y:%8.0f,z:%8.0f] [pad:%6.3f], [%3d,%3d,%3d] \n",l,atom_coord_rotated[l3 ],atom_coord_rotated[l3+1],atom_coord_rotated[l3+2],pad,i,j,k); //if(grid_i[i*ng2+j*ng1+k]!=0)printf(" Pos : %d, current : %f, new : %f\n",i*ng2+j*ng1+k, grid_i[i*ng2+j*ng1+k], _Charge[l]); grid_i[i*ng2+j*ng1+k] += _Charge[l]; } } } __device__ void lig_vox_surface_cut_TtoO(int ng3, float delta, float *grid_r) { // Temporary score to Open space score const int id = blockIdx.x * blockDim.x + threadIdx.x; const float swollen_surface = -8888.0; // temporary score for swollen ligand surface if(id < ng3) { if(grid_r[id]==swollen_surface) { grid_r[id]=0.0; } } } __global__ void convolution_gpu(int nf3, float *rec_r, float *rec_i, cufftComplex *lig_out, cufftComplex *lig_in) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id<nf3) { const float lig_r = lig_out[id].x; const float lig_i = lig_out[id].y; lig_in[id] = make_cuComplex( rec_r[id]*lig_r + rec_i[id]*lig_i, rec_r[id]*lig_i - rec_i[id]*lig_r); //lig_in[id].x = rec_r[id]*lig_r + rec_i[id]*lig_i; //lig_in[id].y = rec_r[id]*lig_i - rec_i[id]*lig_r; } } __global__ void max_pos_single(int nf3, cufftComplex *out, float *score, int *pos) { //blockDim.x = nThreads //score[nBlocks], pos[nBlocks] (nBlocks = nf3 / nThreads) //sdata[nThreads] extern __shared__ float sdata[]; float mscore; const int thr_id = threadIdx.x; const int nThreads = blockDim.x; const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < nf3) { mscore = sdata[thr_id] = cuCrealf(out[id])/nf3; __syncthreads(); //all threads set sdata[thr_id] //reduction for(int offset = nThreads / 2; offset > 0; offset /= 2) { if (thr_id < offset) { sdata[thr_id] = FMAX(sdata[thr_id], sdata[thr_id + offset]); } __syncthreads(); } if (mscore == sdata[0]) {//mscore specify position of max score score[blockIdx.x] = sdata[0]; pos[blockIdx.x] = id; //printf(" BLOCK ID:%d, sdata[0]=%f, pos=%d\n",blockIdx.x,sdata[0],id); } } } __global__ void max_pos_multi_set(int nf3, cufftComplex *out, float *temp_score, int *temp_index) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < nf3) { temp_score[id] = out[id].x; temp_index[id] = id; } } //, std::vector<cufftComplex> *temp_result , thrust::vector<cufftComplex> *temp_result //thrust::device_ptr<cufftComplex> *temp_result cufftComplex *temp_result,thrust::device_ptr<cufftComplex> temp_result __global__ void max_pos_multi(int nf3, cufftComplex *out, float *score, int *pos,const int num_sort,const int offset) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < offset) { if (out[id].x < out[id+offset].x) { out[id].x = out[id+offset].x; out[id].y = out[id+offset].y; } /* if(id==0) { for(int i=0; i<num_sort*2; i++) printf(" id=%d, %f %f\n",i,out[i].x/nf3,out[i].y); printf("\n"); } //*/ } //*/ } /* __global__ void max_pos_multi(int nf3, cufftComplex *out, float *score, int *pos,const int num_sort, float *temp_score, int *temp_index) { //blockDim.x = nThreads, //score[nBlocks], pos[nBlocks] (nBlocks = nf3 / nThreads) //sdata[nThreads] extern __shared__ float sdata[]; float mscore; int offset; const int thr_id = threadIdx.x; const int nThreads = blockDim.x; const int id = blockIdx.x * blockDim.x + threadIdx.x; //* if(id < nf3) { temp_score[id]=cuCrealf(out[id])/nf3; temp_index[id]=id; /* __syncthreads(); //all threads set sdata[thr_id] //reduction for(offset = nThreads / 2; offset > num_sort; ) { offset /= 2; if (thr_id < offset) { sdata[thr_id] = FMAX(sdata[thr_id], sdata[thr_id + offset]); } //if(id<1)printf(" id=%d, t=%d, off=%d\n",id,num_sort,offset); __syncthreads(); } //if(id<1)printf(" [last] id=%d, t=%d, off=%d\n",id,num_sort,offset); //thrust::sort(sdata,sdata+10); if(id < num_sort) { if (mscore == sdata[id]) {//mscore specify position of max score (float equality comparison... amari yokunai) score[blockIdx.x] = sdata[0]; pos[blockIdx.x] = id; //printf(" BLOCK ID:%d, sdata[0]=%f, pos=%d\n",blockIdx.x,sdata[0],i); } } //* if(temp_score[id] >3000) printf(" id=%d, %f %d\n",id,temp_score[id],temp_index[id]); } //* } //*/
46cf1732418cf1dc1c1117e53b4ea29716416fb8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <hip/hip_runtime_api.h> #include <errno.h> #include <unistd.h> /****************************************************************************** * The variable names and the function names of this program is same as provided by the university. The added variable and function are the only changes made to this program. * To compile: * nvcc -o linear_146 linear_146.cu -lm * * To run: * .linear_146 * *****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {69.09,102.61},{82.13,152.08},{73.94,133.41},{67.94,130.43}, {83.26,134.77},{83.05,140.13},{79.17,149.94},{78.38,151.22}, {83.10,152.31},{73.50,140.04},{75.41,129.24},{87.18,142.30}, {65.12,132.63},{49.91,98.86},{ 5.65,45.64},{67.40,142.60}, { 4.96,40.62},{45.49,89.81},{50.42,104.86},{89.77,164.46}, {18.97,51.92},{68.36,128.98},{ 5.51,20.69},{23.82,73.75}, {66.69,139.56},{93.58,169.37},{66.54,128.90},{13.22,50.82}, {46.69,84.63},{60.31,120.24},{61.09,105.13},{40.11,97.65}, {49.86,96.39},{53.57,110.10},{ 6.19,47.58},{12.38,64.19}, {78.14,141.00},{27.83,61.09},{41.00,76.48},{61.04,120.42}, {24.78,68.13},{52.50,107.45},{14.94,69.36},{35.97,79.09}, {40.30,73.31},{92.79,182.68},{ 2.26,38.15},{33.80,70.49}, {65.87,121.84},{45.00,78.95},{83.62,159.92},{35.90,85.48}, { 6.00,51.97},{59.55,110.64},{18.14,49.26},{46.19,75.07}, {42.78,103.67},{21.68,66.82},{64.63,146.59},{96.75,168.65}, {20.19,44.40},{66.85,143.42},{61.71,116.91},{74.56,133.91}, {98.14,166.53},{91.82,154.60},{ 6.83,27.37},{91.03,163.50}, {41.90,92.72},{65.35,121.86},{59.70,116.10},{70.07,122.91}, {97.62,173.25},{21.01,55.66},{17.16,47.12},{71.91,131.35}, { 0.31,17.13},{33.27,56.88},{69.57,128.78},{66.31,121.39}, {81.24,144.19},{90.57,176.58},{77.25,142.55},{94.31,162.47}, {42.88,84.68},{64.35,111.81},{ 2.18,26.05},{84.89,139.55}, { 3.69,42.18},{46.86,111.42},{91.62,162.75},{48.86,103.04}, {11.69,49.69},{94.38,180.77},{35.87,68.86},{13.57,53.53}, {37.92,93.43},{53.11,100.27},{ 9.44,56.21},{66.37,119.22}, {96.89,143.15},{79.79,151.46},{29.43,104.35},{97.53,166.99}, {18.39,75.76},{41.46,88.34},{28.20,71.24},{15.77,48.03}, {23.44,72.10},{79.20,145.25},{ 6.38,37.64},{20.94,61.35}, {31.44,73.70},{58.22,111.84},{35.73,93.21},{54.87,115.86}, {92.84,161.21},{57.35,111.33},{48.13,104.70},{39.15,84.25}, {76.73,131.17},{33.16,86.42},{79.30,152.22},{32.79,85.88}, {61.08,124.63},{33.65,86.36},{30.30,67.58},{25.36,55.42}, {60.60,132.22},{54.55,107.11},{61.55,120.12},{21.43,51.10}, {55.28,114.45},{69.39,146.15},{71.36,136.26},{78.32,158.06}, {53.78,104.73},{41.16,88.31},{58.21,105.38},{22.62,48.68}, {57.70,107.41},{87.20,160.88},{53.56,112.60},{45.79,103.69}, {87.23,172.35},{26.91,66.80},{90.24,146.45},{10.52,51.41}, {88.18,177.14},{89.88,146.67},{ 3.40,44.19},{63.10,118.63}, {72.20,127.43},{ 7.21,44.09},{68.95,116.40},{93.06,141.28}, { 1.78,47.44},{21.78,58.19},{95.38,167.88},{26.07,69.08}, {82.38,144.48},{44.78,87.81},{ 5.45,47.63},{14.61,46.51}, {47.12,97.36},{91.40,146.00},{ 3.92,31.27},{30.83,54.33}, {51.07,118.44},{11.70,45.32},{37.06,76.74},{93.60,154.41}, {88.67,158.77},{58.76,102.75},{18.18,71.37},{61.55,96.09}, {64.81,126.94},{38.89,80.15},{40.33,89.31},{ 6.98,42.89}, {26.84,77.22},{ 1.48,38.03},{69.64,125.26},{60.44,124.70}, {44.32,103.70},{23.96,61.48},{41.32,66.25},{76.06,136.13}, {74.79,139.10},{48.14,86.10},{90.45,175.78},{87.86,133.98}, {92.55,170.68},{58.06,111.65},{79.93,134.77},{61.20,126.32}, {99.31,173.71},{47.55,96.83},{ 5.95,58.93},{54.41,117.09}, {49.62,95.15},{20.48,52.60},{26.69,64.39},{77.38,138.43}, {21.50,61.58},{37.20,100.88},{26.12,60.30},{45.04,101.03}, {12.08,59.04},{13.59,46.57},{97.04,166.44},{58.34,109.75}, { 4.47,41.91},{ 4.82,26.79},{49.25,120.66},{47.96,92.87}, {94.68,160.36},{38.52,78.12},{18.86,67.64},{65.01,128.12}, {94.62,168.74},{46.49,100.40},{18.67,59.96},{61.25,126.12}, {26.30,63.87},{16.32,75.77},{47.98,83.09},{97.67,167.17}, {90.04,155.34},{61.05,119.81},{34.12,79.69},{36.37,81.55}, {67.37,147.11},{94.03,166.73},{27.01,54.22},{14.88,38.59}, {53.43,101.46},{44.85,102.66},{96.98,172.50},{29.25,65.32}, {20.28,82.30},{82.38,148.74},{47.99,95.71},{54.39,113.38}, {24.78,60.98},{46.40,112.11},{83.11,154.36},{78.56,142.47}, {52.44,120.30},{85.45,139.05},{18.44,55.81},{33.82,91.05}, {81.25,148.47},{28.27,71.42},{28.72,81.91},{43.69,113.37}, {95.79,176.73},{20.00,66.36},{ 1.90,51.15},{69.08,102.50}, {66.37,126.22},{75.30,137.17},{50.89,107.35},{26.57,71.46}, {96.71,173.85},{61.50,136.37},{24.52,55.79},{ 3.31,25.80}, {58.29,121.62},{38.72,73.27},{58.48,109.58},{79.94,143.31}, {45.13,95.27},{14.01,41.17},{63.65,117.17},{73.39,150.88}, {57.61,118.65},{68.16,125.50},{78.05,134.05},{84.45,140.93}, {64.23,127.52},{51.91,105.59},{52.26,103.94},{48.43,74.06}, {45.19,95.99},{16.62,66.21},{34.10,70.76},{46.25,96.86}, {65.54,128.94},{73.65,134.18},{87.08,161.45},{45.86,101.31}, {99.65,155.49},{62.47,116.88},{60.77,123.30},{10.00,73.95}, {69.31,138.95},{99.81,190.04},{18.80,61.57},{11.29,37.86}, {32.22,61.46},{83.93,165.72},{23.79,61.15},{61.98,134.84}, {36.38,81.78},{91.53,162.22},{17.14,42.61},{31.77,81.24}, {41.24,88.26},{53.74,120.40},{ 7.59,46.01},{69.20,145.90}, {31.57,76.95},{41.83,103.94},{76.31,145.60},{34.57,86.48}, {78.44,140.01},{35.91,83.76},{67.19,122.40},{28.51,69.92}, {41.12,109.23},{38.78,89.35},{87.32,160.07},{81.17,141.79}, {27.93,80.99},{43.71,85.72},{35.55,76.40},{48.41,96.15}, {35.86,93.18},{58.45,103.95},{32.51,76.14},{22.98,47.63}, {84.80,150.30},{19.37,67.26},{62.21,124.12},{72.56,125.75}, {12.62,44.66},{84.63,150.02},{35.79,77.18},{91.59,167.42}, {56.79,135.96},{60.75,128.75},{75.17,132.66},{50.03,97.47}, {22.77,81.91},{79.14,140.88},{30.32,89.46},{64.27,117.96}, {14.61,72.92},{13.17,65.89},{73.53,123.85},{77.44,118.69}, {56.04,107.10},{46.18,92.11},{54.48,94.07},{73.79,151.52}, { 8.83,39.47},{ 3.58,40.64},{33.35,83.41},{63.51,127.53}, {63.18,134.55},{38.65,77.96},{81.77,143.31},{28.99,71.78}, {46.57,107.91},{31.88,61.51},{69.75,121.75},{39.03,92.38}, {78.95,138.81},{33.03,62.89},{93.32,160.29},{51.73,96.94}, {99.52,178.80},{39.66,81.62},{20.81,63.06},{54.52,101.68}, {13.88,77.63},{11.54,47.90},{57.06,111.62},{34.85,73.47}, { 1.03,38.38},{99.98,147.72},{15.65,37.52},{93.98,171.57}, {26.00,61.54},{26.34,97.49},{44.45,89.46},{30.19,85.94}, {65.17,121.23},{96.37,174.62},{17.59,57.65},{49.21,85.15}, {97.14,164.23},{69.46,128.85},{52.29,116.47},{59.90,97.23}, {41.62,96.65},{46.43,96.75},{64.79,120.04},{16.39,42.84}, {96.74,160.51},{15.06,67.38},{12.89,32.01},{59.36,96.22}, {36.49,85.66},{ 6.12,33.73},{87.56,144.59},{58.04,116.48}, {45.12,109.10},{90.29,155.38},{15.50,58.50},{ 8.17,39.33}, {36.71,87.05},{95.02,175.91},{56.45,102.69},{29.55,76.64}, {81.35,144.65},{51.25,106.51},{47.80,101.57},{39.43,89.38}, {16.54,75.06},{18.23,39.97},{38.47,84.60},{72.95,137.89}, {20.95,63.99},{53.89,104.26},{62.01,111.63},{77.09,132.07}, {75.62,131.00},{82.52,143.40},{29.78,67.90},{24.28,65.09}, {60.18,116.00},{64.87,116.99},{66.81,118.50},{97.63,173.78}, {82.52,145.14},{17.04,56.60},{69.23,128.14},{11.14,35.51}, {44.81,75.59},{79.91,130.95},{73.98,123.15},{30.70,68.34}, {16.56,69.96},{44.54,111.55},{42.63,98.01},{ 8.33,52.94}, {23.70,81.71},{72.51,126.68},{51.65,91.93},{18.86,52.82}, {51.40,105.16},{33.69,78.93},{48.25,95.34},{95.60,165.19}, {70.55,135.12},{ 7.54,25.40},{84.10,147.82},{ 5.46,55.62}, {51.25,105.36},{33.31,70.29},{91.09,155.95},{93.79,150.87}, {39.64,105.11},{23.94,70.09},{ 2.79,57.46},{93.12,157.26}, {82.66,153.35},{20.67,50.18},{25.48,55.36},{73.93,125.80}, {82.75,164.55},{66.85,115.72},{ 6.63,57.24},{56.61,128.98}, {73.65,146.00},{28.61,76.79},{80.74,154.19},{42.26,82.19}, {48.99,96.21},{29.42,74.92},{47.75,96.08},{30.98,74.08}, {21.47,58.18},{40.25,84.61},{71.79,134.58},{62.21,100.02}, { 6.50,44.15},{23.18,54.16},{99.44,173.73},{34.55,74.88}, {44.42,90.87},{94.40,160.68},{44.12,83.50},{38.81,83.14}, {17.89,48.98},{41.90,89.86},{53.95,124.31},{34.65,77.79}, {99.46,188.27},{45.96,96.10},{56.07,108.36},{52.48,94.99}, {51.23,105.75},{13.28,48.42},{81.65,142.71},{27.37,57.49}, {47.88,98.01},{45.77,83.07},{67.87,113.75},{ 6.56,20.13}, {57.51,99.93},{45.05,98.83},{86.15,154.73},{32.01,66.76}, {10.76,37.49},{15.01,59.42},{56.76,111.61},{34.09,78.70}, {88.70,179.11},{50.88,120.11},{22.56,51.27},{77.01,143.66}, {31.56,69.66},{45.50,100.94},{64.12,114.38},{86.51,162.80}, {91.85,153.99},{89.59,139.43},{62.84,120.04},{ 1.00,45.36}, {76.04,134.88},{ 2.31,39.43},{ 4.08,36.49},{50.56,91.92}, {80.18,165.25},{84.88,150.80},{85.96,154.04},{58.01,121.88}, {83.62,142.62},{78.78,133.92},{24.57,47.24},{10.38,51.07}, {70.57,129.78},{ 6.24,63.42},{95.52,158.82},{47.53,99.59}, {37.80,86.23},{57.04,125.93},{98.17,189.61},{18.58,56.15}, {43.64,95.36},{97.10,155.81},{41.32,104.04},{58.80,110.09}, {46.92,109.08},{55.66,115.05},{43.44,91.67},{ 1.16,18.39}, {44.90,79.00},{88.95,146.71},{77.11,140.55},{38.22,70.52}, {33.63,94.80},{82.24,147.90},{32.39,76.09},{68.71,137.44}, {61.91,139.84},{ 2.72,31.93},{31.01,83.44},{43.82,82.06}, {90.41,138.37},{74.29,146.66},{34.46,82.13},{27.66,70.23}, { 0.75,35.40},{70.47,122.31},{38.41,74.27},{59.11,111.32}, {94.20,145.15},{61.79,112.71},{25.69,72.79},{17.38,73.54}, { 2.57,27.14},{40.88,85.46},{59.63,106.14},{23.24,43.26}, {24.06,73.14},{ 3.38,46.52},{24.26,58.40},{93.00,155.20}, {48.69,81.21},{32.00,80.68},{67.43,124.66},{76.88,118.26}, {97.26,169.42},{52.05,86.99},{26.68,80.02},{88.80,164.27}, {30.79,92.40},{13.76,46.00},{ 4.39,20.46},{47.45,110.48}, {22.42,68.09},{39.34,105.98},{90.58,167.10},{45.22,80.64}, { 2.29,24.47},{51.00,101.65},{16.63,52.64},{43.24,91.93}, {33.47,77.44},{69.44,121.59},{93.63,154.70},{97.34,176.12}, {82.83,145.39},{84.31,123.04},{51.96,110.56},{13.79,49.08}, {85.09,147.33},{19.50,64.58},{25.47,81.74},{51.68,106.78}, {27.99,75.05},{ 2.78,60.03},{93.13,166.31},{27.66,83.80}, {43.98,86.41},{ 6.34,27.97},{41.25,74.57},{10.49,37.94}, {94.29,159.02},{33.41,82.45},{19.80,64.16},{18.35,65.02}, {93.57,182.68},{ 7.90,51.96},{85.56,157.84},{50.76,88.32}, {65.70,121.81},{72.32,151.58},{23.45,66.39},{ 4.44,33.36}, {58.98,115.21},{12.18,46.07},{30.66,60.74},{69.63,124.96}, { 2.69,37.64},{16.96,57.25},{60.58,126.99},{73.60,135.37}, {11.98,68.00},{ 0.42,38.89},{47.45,90.83},{44.59,89.17}, { 3.95,27.62},{78.31,145.77},{81.91,153.36},{47.00,109.88}, { 3.10,31.96},{59.53,105.57},{14.67,55.59},{30.44,90.14}, {64.61,123.88},{40.33,96.71},{67.44,133.61},{42.29,68.72}, {44.38,90.29},{65.86,136.25},{91.75,157.96},{24.71,74.32}, {89.50,153.96},{95.40,160.30},{ 2.51,32.12},{51.63,109.59}, {41.35,101.13},{19.94,59.70},{65.45,132.43},{38.21,117.97}, {40.69,84.45},{50.78,126.50},{47.42,103.31},{ 6.98,47.56}, {95.49,162.77},{62.64,129.54},{45.89,106.75},{71.43,126.03}, {95.26,169.84},{81.30,128.51},{16.16,48.62},{ 8.97,75.32}, {28.86,95.51},{10.27,41.50},{78.84,153.22},{83.90,173.33}, {51.33,105.65},{36.43,88.93},{70.23,131.60},{23.96,67.38}, {45.21,97.59},{76.50,118.70},{63.22,105.01},{33.75,102.37}, {72.57,140.64},{18.16,68.62},{27.39,71.28},{ 3.22,51.94}, {89.94,151.77},{23.53,59.07},{18.69,44.83},{25.87,75.41}, {90.76,147.03},{39.84,92.09},{89.20,136.14},{ 1.70,38.68}, {32.49,69.34},{ 6.78,38.27},{32.36,78.73},{57.57,128.00}, {81.11,147.45},{93.22,161.07},{75.48,137.02},{70.72,129.74}, {33.18,80.42},{16.09,52.35},{12.92,42.89},{14.63,47.45}, {16.33,60.65},{26.52,60.33},{65.75,144.10},{60.94,100.72}, {28.23,76.55},{20.77,77.79},{46.35,83.09},{85.82,151.43}, {51.71,91.53},{42.60,70.64},{ 4.22,48.90},{61.25,93.32}, {14.76,46.45},{44.55,83.34},{57.30,106.17},{66.70,130.32}, {47.19,113.56},{14.60,19.99},{29.04,67.68},{72.16,136.94}, {24.30,60.44},{44.74,96.11},{58.89,105.52},{53.13,107.86}, {67.93,143.40},{60.48,113.91},{47.24,104.36},{58.88,114.66}, {80.90,148.45},{58.37,113.83},{89.03,138.68},{ 8.12,41.01}, {24.14,68.27},{37.59,74.19},{44.67,83.16},{ 5.31,35.62}, {57.88,103.64},{76.66,135.69},{47.92,97.61},{34.11,71.58}, {73.72,133.44},{67.16,123.69},{ 2.70,45.81},{13.06,47.63}, {70.19,132.68},{40.77,85.91},{68.57,122.66},{83.35,147.40}, {95.10,159.26},{76.55,140.12},{44.42,92.71},{77.80,141.02}, {18.03,54.85},{16.33,44.89},{54.49,102.97},{88.63,149.21}, {53.20,104.65},{68.60,128.48},{13.34,52.97},{17.39,48.69}, {95.50,153.55},{75.90,130.21},{93.37,171.51},{59.28,117.72}, {22.33,72.20},{31.01,81.27},{16.35,50.69},{ 9.23,54.49}, {86.04,158.82},{46.01,99.38},{32.61,47.24},{14.21,40.26}, {37.28,76.21},{60.05,106.09},{94.82,165.99},{98.03,169.17}, { 8.14,37.52},{29.47,58.43},{91.59,180.13},{64.46,123.22}, {81.92,164.97},{40.79,93.04},{98.66,170.59},{85.47,151.64}, {13.01,51.89},{69.39,134.86},{77.60,128.60},{89.83,157.49}, {22.48,52.09},{61.31,118.09},{39.29,91.09},{57.26,80.48}, {62.54,129.76},{30.22,81.60},{21.56,60.61},{65.89,99.43}, {78.90,151.00},{93.57,158.74},{38.70,76.63},{ 8.25,47.63}, {91.94,169.65},{54.05,110.82},{98.12,177.20},{17.59,55.03}, {74.27,140.33},{58.40,122.38},{ 6.27,35.39},{90.31,164.73}, {87.59,150.48},{47.17,97.51},{71.25,127.19},{56.58,106.56}, {52.14,108.51},{24.20,80.20},{30.66,79.03},{59.72,116.04}, {60.16,123.78},{22.09,68.92},{45.29,111.19},{92.16,156.21}, {45.05,113.83},{81.73,145.26},{34.92,85.83},{13.92,67.65}, {24.14,70.45},{81.85,153.42},{41.19,85.93},{ 0.67,39.26}, {36.09,90.68},{19.54,62.25},{33.38,74.35},{10.19,55.59}, {59.29,93.03},{11.58,62.19},{87.30,152.51},{25.43,59.13}, {74.35,134.81},{65.23,139.29},{32.23,83.27},{70.94,130.63}, {70.57,128.03},{15.13,65.98},{88.17,138.65},{ 9.49,43.25}, {68.37,120.54},{74.69,143.68},{50.10,119.13},{ 8.76,32.56}, {37.70,101.07},{ 4.04,42.48},{29.84,97.01},{69.88,130.23}, { 8.65,45.47},{66.47,118.24},{12.35,55.24},{20.41,64.50}, {91.28,166.16},{91.82,165.77},{10.65,52.39},{79.95,147.31}, {16.06,57.21},{71.00,131.19},{91.27,155.74},{28.57,60.74}, {31.63,92.00},{15.84,50.24},{80.22,135.00},{51.52,96.40}, {67.42,117.71},{15.46,65.55},{85.71,151.69},{99.32,165.22}, {28.38,107.61},{75.32,129.56},{96.48,161.61},{36.18,95.12}, {34.18,71.39},{ 7.77,40.69},{37.98,65.63},{68.95,131.41}, {34.08,78.15},{29.70,84.37},{92.61,178.05},{82.22,128.06}, { 1.32,32.39},{ 2.57,41.54},{72.80,142.40},{19.88,47.67}, {95.42,167.29},{ 7.56,63.21},{90.52,146.47},{67.80,115.47}, {89.41,162.67},{87.72,165.63},{71.28,138.33},{25.08,70.34}, {41.57,86.86},{51.45,104.07},{61.54,121.29},{32.53,93.45}, { 3.26,29.72},{74.50,137.34},{45.07,102.67},{67.55,124.04}, {38.14,88.93},{ 2.46,37.56},{21.27,66.20},{32.26,67.66}, {36.05,64.93},{78.82,147.50},{ 7.26,40.83},{86.28,157.01}, {49.02,95.59},{80.36,136.69},{ 2.94,46.23},{50.55,102.49}, {28.35,69.73},{19.99,77.88},{22.42,72.52},{15.98,58.85}, {68.79,130.10},{23.15,58.87},{ 3.27,40.79},{79.02,150.83}, {37.04,72.00},{45.11,79.16},{30.61,58.70},{74.69,133.55}, {25.47,71.56},{89.34,157.40},{61.39,125.26},{ 1.52,31.44}, {78.37,136.81},{48.33,123.74},{71.58,126.89},{ 5.20,49.01}, {90.20,142.37},{94.03,153.03},{97.27,167.60},{91.90,156.09}, {56.45,131.39},{59.09,118.83},{61.75,115.24},{22.31,75.23}, {73.98,142.59},{24.38,61.52},{73.46,110.70},{27.35,85.50}, {55.66,115.25},{67.56,119.99},{61.81,130.15},{12.05,48.88}, {84.16,146.44},{48.33,99.47},{83.37,138.40},{23.51,81.53}, {85.14,138.65},{92.12,176.32},{53.13,115.81},{90.59,162.45}, {92.52,182.64},{76.45,149.04},{48.40,96.60},{ 8.70,45.67}, {96.13,174.15},{21.67,43.90},{15.13,43.97},{73.66,137.40}, {80.31,136.39},{79.59,139.56},{55.33,112.94},{27.38,47.87} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } // Calculate the difference between two times. Returns zero on // success and the time difference through an argument. It will // be unsuccessful if the start time is after the end time. int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int d_sec = finish->tv_sec - start->tv_sec; long long int d_nsec = finish->tv_nsec - start->tv_nsec; if(d_nsec < 0 ) { d_sec--; d_nsec += 1000000000; } *difference = d_sec * 1000000000 + d_nsec; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = hipMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error, hipGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error, hipGetErrorString(error)); } error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error, hipGetErrorString(error)); } error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error, hipGetErrorString(error)); } for(i=0;i<8;i++) { double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i], &d_dc[i], d_error_sum_arr, d_data); hipDeviceSynchronize(); error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost); if(error){ fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error, hipGetErrorString(error)); } for(int j=0; j<n_data; j++) { error_sum_total += h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } error_sum_total = 0; } if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } error = hipFree(d_dm); if(error){ fprintf(stderr, "hipFree on d_dm returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipFree(d_dc); if(error){ fprintf(stderr, "hipFree on d_dc returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipFree(d_data); if(error){ fprintf(stderr, "hipFree on d_data returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipFree(d_error_sum_arr); if(error){ fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error, hipGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
46cf1732418cf1dc1c1117e53b4ea29716416fb8.cu
#include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <errno.h> #include <unistd.h> /****************************************************************************** * The variable names and the function names of this program is same as provided by the university. The added variable and function are the only changes made to this program. * To compile: * nvcc -o linear_146 linear_146.cu -lm * * To run: * .linear_146 * *****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {69.09,102.61},{82.13,152.08},{73.94,133.41},{67.94,130.43}, {83.26,134.77},{83.05,140.13},{79.17,149.94},{78.38,151.22}, {83.10,152.31},{73.50,140.04},{75.41,129.24},{87.18,142.30}, {65.12,132.63},{49.91,98.86},{ 5.65,45.64},{67.40,142.60}, { 4.96,40.62},{45.49,89.81},{50.42,104.86},{89.77,164.46}, {18.97,51.92},{68.36,128.98},{ 5.51,20.69},{23.82,73.75}, {66.69,139.56},{93.58,169.37},{66.54,128.90},{13.22,50.82}, {46.69,84.63},{60.31,120.24},{61.09,105.13},{40.11,97.65}, {49.86,96.39},{53.57,110.10},{ 6.19,47.58},{12.38,64.19}, {78.14,141.00},{27.83,61.09},{41.00,76.48},{61.04,120.42}, {24.78,68.13},{52.50,107.45},{14.94,69.36},{35.97,79.09}, {40.30,73.31},{92.79,182.68},{ 2.26,38.15},{33.80,70.49}, {65.87,121.84},{45.00,78.95},{83.62,159.92},{35.90,85.48}, { 6.00,51.97},{59.55,110.64},{18.14,49.26},{46.19,75.07}, {42.78,103.67},{21.68,66.82},{64.63,146.59},{96.75,168.65}, {20.19,44.40},{66.85,143.42},{61.71,116.91},{74.56,133.91}, {98.14,166.53},{91.82,154.60},{ 6.83,27.37},{91.03,163.50}, {41.90,92.72},{65.35,121.86},{59.70,116.10},{70.07,122.91}, {97.62,173.25},{21.01,55.66},{17.16,47.12},{71.91,131.35}, { 0.31,17.13},{33.27,56.88},{69.57,128.78},{66.31,121.39}, {81.24,144.19},{90.57,176.58},{77.25,142.55},{94.31,162.47}, {42.88,84.68},{64.35,111.81},{ 2.18,26.05},{84.89,139.55}, { 3.69,42.18},{46.86,111.42},{91.62,162.75},{48.86,103.04}, {11.69,49.69},{94.38,180.77},{35.87,68.86},{13.57,53.53}, {37.92,93.43},{53.11,100.27},{ 9.44,56.21},{66.37,119.22}, {96.89,143.15},{79.79,151.46},{29.43,104.35},{97.53,166.99}, {18.39,75.76},{41.46,88.34},{28.20,71.24},{15.77,48.03}, {23.44,72.10},{79.20,145.25},{ 6.38,37.64},{20.94,61.35}, {31.44,73.70},{58.22,111.84},{35.73,93.21},{54.87,115.86}, {92.84,161.21},{57.35,111.33},{48.13,104.70},{39.15,84.25}, {76.73,131.17},{33.16,86.42},{79.30,152.22},{32.79,85.88}, {61.08,124.63},{33.65,86.36},{30.30,67.58},{25.36,55.42}, {60.60,132.22},{54.55,107.11},{61.55,120.12},{21.43,51.10}, {55.28,114.45},{69.39,146.15},{71.36,136.26},{78.32,158.06}, {53.78,104.73},{41.16,88.31},{58.21,105.38},{22.62,48.68}, {57.70,107.41},{87.20,160.88},{53.56,112.60},{45.79,103.69}, {87.23,172.35},{26.91,66.80},{90.24,146.45},{10.52,51.41}, {88.18,177.14},{89.88,146.67},{ 3.40,44.19},{63.10,118.63}, {72.20,127.43},{ 7.21,44.09},{68.95,116.40},{93.06,141.28}, { 1.78,47.44},{21.78,58.19},{95.38,167.88},{26.07,69.08}, {82.38,144.48},{44.78,87.81},{ 5.45,47.63},{14.61,46.51}, {47.12,97.36},{91.40,146.00},{ 3.92,31.27},{30.83,54.33}, {51.07,118.44},{11.70,45.32},{37.06,76.74},{93.60,154.41}, {88.67,158.77},{58.76,102.75},{18.18,71.37},{61.55,96.09}, {64.81,126.94},{38.89,80.15},{40.33,89.31},{ 6.98,42.89}, {26.84,77.22},{ 1.48,38.03},{69.64,125.26},{60.44,124.70}, {44.32,103.70},{23.96,61.48},{41.32,66.25},{76.06,136.13}, {74.79,139.10},{48.14,86.10},{90.45,175.78},{87.86,133.98}, {92.55,170.68},{58.06,111.65},{79.93,134.77},{61.20,126.32}, {99.31,173.71},{47.55,96.83},{ 5.95,58.93},{54.41,117.09}, {49.62,95.15},{20.48,52.60},{26.69,64.39},{77.38,138.43}, {21.50,61.58},{37.20,100.88},{26.12,60.30},{45.04,101.03}, {12.08,59.04},{13.59,46.57},{97.04,166.44},{58.34,109.75}, { 4.47,41.91},{ 4.82,26.79},{49.25,120.66},{47.96,92.87}, {94.68,160.36},{38.52,78.12},{18.86,67.64},{65.01,128.12}, {94.62,168.74},{46.49,100.40},{18.67,59.96},{61.25,126.12}, {26.30,63.87},{16.32,75.77},{47.98,83.09},{97.67,167.17}, {90.04,155.34},{61.05,119.81},{34.12,79.69},{36.37,81.55}, {67.37,147.11},{94.03,166.73},{27.01,54.22},{14.88,38.59}, {53.43,101.46},{44.85,102.66},{96.98,172.50},{29.25,65.32}, {20.28,82.30},{82.38,148.74},{47.99,95.71},{54.39,113.38}, {24.78,60.98},{46.40,112.11},{83.11,154.36},{78.56,142.47}, {52.44,120.30},{85.45,139.05},{18.44,55.81},{33.82,91.05}, {81.25,148.47},{28.27,71.42},{28.72,81.91},{43.69,113.37}, {95.79,176.73},{20.00,66.36},{ 1.90,51.15},{69.08,102.50}, {66.37,126.22},{75.30,137.17},{50.89,107.35},{26.57,71.46}, {96.71,173.85},{61.50,136.37},{24.52,55.79},{ 3.31,25.80}, {58.29,121.62},{38.72,73.27},{58.48,109.58},{79.94,143.31}, {45.13,95.27},{14.01,41.17},{63.65,117.17},{73.39,150.88}, {57.61,118.65},{68.16,125.50},{78.05,134.05},{84.45,140.93}, {64.23,127.52},{51.91,105.59},{52.26,103.94},{48.43,74.06}, {45.19,95.99},{16.62,66.21},{34.10,70.76},{46.25,96.86}, {65.54,128.94},{73.65,134.18},{87.08,161.45},{45.86,101.31}, {99.65,155.49},{62.47,116.88},{60.77,123.30},{10.00,73.95}, {69.31,138.95},{99.81,190.04},{18.80,61.57},{11.29,37.86}, {32.22,61.46},{83.93,165.72},{23.79,61.15},{61.98,134.84}, {36.38,81.78},{91.53,162.22},{17.14,42.61},{31.77,81.24}, {41.24,88.26},{53.74,120.40},{ 7.59,46.01},{69.20,145.90}, {31.57,76.95},{41.83,103.94},{76.31,145.60},{34.57,86.48}, {78.44,140.01},{35.91,83.76},{67.19,122.40},{28.51,69.92}, {41.12,109.23},{38.78,89.35},{87.32,160.07},{81.17,141.79}, {27.93,80.99},{43.71,85.72},{35.55,76.40},{48.41,96.15}, {35.86,93.18},{58.45,103.95},{32.51,76.14},{22.98,47.63}, {84.80,150.30},{19.37,67.26},{62.21,124.12},{72.56,125.75}, {12.62,44.66},{84.63,150.02},{35.79,77.18},{91.59,167.42}, {56.79,135.96},{60.75,128.75},{75.17,132.66},{50.03,97.47}, {22.77,81.91},{79.14,140.88},{30.32,89.46},{64.27,117.96}, {14.61,72.92},{13.17,65.89},{73.53,123.85},{77.44,118.69}, {56.04,107.10},{46.18,92.11},{54.48,94.07},{73.79,151.52}, { 8.83,39.47},{ 3.58,40.64},{33.35,83.41},{63.51,127.53}, {63.18,134.55},{38.65,77.96},{81.77,143.31},{28.99,71.78}, {46.57,107.91},{31.88,61.51},{69.75,121.75},{39.03,92.38}, {78.95,138.81},{33.03,62.89},{93.32,160.29},{51.73,96.94}, {99.52,178.80},{39.66,81.62},{20.81,63.06},{54.52,101.68}, {13.88,77.63},{11.54,47.90},{57.06,111.62},{34.85,73.47}, { 1.03,38.38},{99.98,147.72},{15.65,37.52},{93.98,171.57}, {26.00,61.54},{26.34,97.49},{44.45,89.46},{30.19,85.94}, {65.17,121.23},{96.37,174.62},{17.59,57.65},{49.21,85.15}, {97.14,164.23},{69.46,128.85},{52.29,116.47},{59.90,97.23}, {41.62,96.65},{46.43,96.75},{64.79,120.04},{16.39,42.84}, {96.74,160.51},{15.06,67.38},{12.89,32.01},{59.36,96.22}, {36.49,85.66},{ 6.12,33.73},{87.56,144.59},{58.04,116.48}, {45.12,109.10},{90.29,155.38},{15.50,58.50},{ 8.17,39.33}, {36.71,87.05},{95.02,175.91},{56.45,102.69},{29.55,76.64}, {81.35,144.65},{51.25,106.51},{47.80,101.57},{39.43,89.38}, {16.54,75.06},{18.23,39.97},{38.47,84.60},{72.95,137.89}, {20.95,63.99},{53.89,104.26},{62.01,111.63},{77.09,132.07}, {75.62,131.00},{82.52,143.40},{29.78,67.90},{24.28,65.09}, {60.18,116.00},{64.87,116.99},{66.81,118.50},{97.63,173.78}, {82.52,145.14},{17.04,56.60},{69.23,128.14},{11.14,35.51}, {44.81,75.59},{79.91,130.95},{73.98,123.15},{30.70,68.34}, {16.56,69.96},{44.54,111.55},{42.63,98.01},{ 8.33,52.94}, {23.70,81.71},{72.51,126.68},{51.65,91.93},{18.86,52.82}, {51.40,105.16},{33.69,78.93},{48.25,95.34},{95.60,165.19}, {70.55,135.12},{ 7.54,25.40},{84.10,147.82},{ 5.46,55.62}, {51.25,105.36},{33.31,70.29},{91.09,155.95},{93.79,150.87}, {39.64,105.11},{23.94,70.09},{ 2.79,57.46},{93.12,157.26}, {82.66,153.35},{20.67,50.18},{25.48,55.36},{73.93,125.80}, {82.75,164.55},{66.85,115.72},{ 6.63,57.24},{56.61,128.98}, {73.65,146.00},{28.61,76.79},{80.74,154.19},{42.26,82.19}, {48.99,96.21},{29.42,74.92},{47.75,96.08},{30.98,74.08}, {21.47,58.18},{40.25,84.61},{71.79,134.58},{62.21,100.02}, { 6.50,44.15},{23.18,54.16},{99.44,173.73},{34.55,74.88}, {44.42,90.87},{94.40,160.68},{44.12,83.50},{38.81,83.14}, {17.89,48.98},{41.90,89.86},{53.95,124.31},{34.65,77.79}, {99.46,188.27},{45.96,96.10},{56.07,108.36},{52.48,94.99}, {51.23,105.75},{13.28,48.42},{81.65,142.71},{27.37,57.49}, {47.88,98.01},{45.77,83.07},{67.87,113.75},{ 6.56,20.13}, {57.51,99.93},{45.05,98.83},{86.15,154.73},{32.01,66.76}, {10.76,37.49},{15.01,59.42},{56.76,111.61},{34.09,78.70}, {88.70,179.11},{50.88,120.11},{22.56,51.27},{77.01,143.66}, {31.56,69.66},{45.50,100.94},{64.12,114.38},{86.51,162.80}, {91.85,153.99},{89.59,139.43},{62.84,120.04},{ 1.00,45.36}, {76.04,134.88},{ 2.31,39.43},{ 4.08,36.49},{50.56,91.92}, {80.18,165.25},{84.88,150.80},{85.96,154.04},{58.01,121.88}, {83.62,142.62},{78.78,133.92},{24.57,47.24},{10.38,51.07}, {70.57,129.78},{ 6.24,63.42},{95.52,158.82},{47.53,99.59}, {37.80,86.23},{57.04,125.93},{98.17,189.61},{18.58,56.15}, {43.64,95.36},{97.10,155.81},{41.32,104.04},{58.80,110.09}, {46.92,109.08},{55.66,115.05},{43.44,91.67},{ 1.16,18.39}, {44.90,79.00},{88.95,146.71},{77.11,140.55},{38.22,70.52}, {33.63,94.80},{82.24,147.90},{32.39,76.09},{68.71,137.44}, {61.91,139.84},{ 2.72,31.93},{31.01,83.44},{43.82,82.06}, {90.41,138.37},{74.29,146.66},{34.46,82.13},{27.66,70.23}, { 0.75,35.40},{70.47,122.31},{38.41,74.27},{59.11,111.32}, {94.20,145.15},{61.79,112.71},{25.69,72.79},{17.38,73.54}, { 2.57,27.14},{40.88,85.46},{59.63,106.14},{23.24,43.26}, {24.06,73.14},{ 3.38,46.52},{24.26,58.40},{93.00,155.20}, {48.69,81.21},{32.00,80.68},{67.43,124.66},{76.88,118.26}, {97.26,169.42},{52.05,86.99},{26.68,80.02},{88.80,164.27}, {30.79,92.40},{13.76,46.00},{ 4.39,20.46},{47.45,110.48}, {22.42,68.09},{39.34,105.98},{90.58,167.10},{45.22,80.64}, { 2.29,24.47},{51.00,101.65},{16.63,52.64},{43.24,91.93}, {33.47,77.44},{69.44,121.59},{93.63,154.70},{97.34,176.12}, {82.83,145.39},{84.31,123.04},{51.96,110.56},{13.79,49.08}, {85.09,147.33},{19.50,64.58},{25.47,81.74},{51.68,106.78}, {27.99,75.05},{ 2.78,60.03},{93.13,166.31},{27.66,83.80}, {43.98,86.41},{ 6.34,27.97},{41.25,74.57},{10.49,37.94}, {94.29,159.02},{33.41,82.45},{19.80,64.16},{18.35,65.02}, {93.57,182.68},{ 7.90,51.96},{85.56,157.84},{50.76,88.32}, {65.70,121.81},{72.32,151.58},{23.45,66.39},{ 4.44,33.36}, {58.98,115.21},{12.18,46.07},{30.66,60.74},{69.63,124.96}, { 2.69,37.64},{16.96,57.25},{60.58,126.99},{73.60,135.37}, {11.98,68.00},{ 0.42,38.89},{47.45,90.83},{44.59,89.17}, { 3.95,27.62},{78.31,145.77},{81.91,153.36},{47.00,109.88}, { 3.10,31.96},{59.53,105.57},{14.67,55.59},{30.44,90.14}, {64.61,123.88},{40.33,96.71},{67.44,133.61},{42.29,68.72}, {44.38,90.29},{65.86,136.25},{91.75,157.96},{24.71,74.32}, {89.50,153.96},{95.40,160.30},{ 2.51,32.12},{51.63,109.59}, {41.35,101.13},{19.94,59.70},{65.45,132.43},{38.21,117.97}, {40.69,84.45},{50.78,126.50},{47.42,103.31},{ 6.98,47.56}, {95.49,162.77},{62.64,129.54},{45.89,106.75},{71.43,126.03}, {95.26,169.84},{81.30,128.51},{16.16,48.62},{ 8.97,75.32}, {28.86,95.51},{10.27,41.50},{78.84,153.22},{83.90,173.33}, {51.33,105.65},{36.43,88.93},{70.23,131.60},{23.96,67.38}, {45.21,97.59},{76.50,118.70},{63.22,105.01},{33.75,102.37}, {72.57,140.64},{18.16,68.62},{27.39,71.28},{ 3.22,51.94}, {89.94,151.77},{23.53,59.07},{18.69,44.83},{25.87,75.41}, {90.76,147.03},{39.84,92.09},{89.20,136.14},{ 1.70,38.68}, {32.49,69.34},{ 6.78,38.27},{32.36,78.73},{57.57,128.00}, {81.11,147.45},{93.22,161.07},{75.48,137.02},{70.72,129.74}, {33.18,80.42},{16.09,52.35},{12.92,42.89},{14.63,47.45}, {16.33,60.65},{26.52,60.33},{65.75,144.10},{60.94,100.72}, {28.23,76.55},{20.77,77.79},{46.35,83.09},{85.82,151.43}, {51.71,91.53},{42.60,70.64},{ 4.22,48.90},{61.25,93.32}, {14.76,46.45},{44.55,83.34},{57.30,106.17},{66.70,130.32}, {47.19,113.56},{14.60,19.99},{29.04,67.68},{72.16,136.94}, {24.30,60.44},{44.74,96.11},{58.89,105.52},{53.13,107.86}, {67.93,143.40},{60.48,113.91},{47.24,104.36},{58.88,114.66}, {80.90,148.45},{58.37,113.83},{89.03,138.68},{ 8.12,41.01}, {24.14,68.27},{37.59,74.19},{44.67,83.16},{ 5.31,35.62}, {57.88,103.64},{76.66,135.69},{47.92,97.61},{34.11,71.58}, {73.72,133.44},{67.16,123.69},{ 2.70,45.81},{13.06,47.63}, {70.19,132.68},{40.77,85.91},{68.57,122.66},{83.35,147.40}, {95.10,159.26},{76.55,140.12},{44.42,92.71},{77.80,141.02}, {18.03,54.85},{16.33,44.89},{54.49,102.97},{88.63,149.21}, {53.20,104.65},{68.60,128.48},{13.34,52.97},{17.39,48.69}, {95.50,153.55},{75.90,130.21},{93.37,171.51},{59.28,117.72}, {22.33,72.20},{31.01,81.27},{16.35,50.69},{ 9.23,54.49}, {86.04,158.82},{46.01,99.38},{32.61,47.24},{14.21,40.26}, {37.28,76.21},{60.05,106.09},{94.82,165.99},{98.03,169.17}, { 8.14,37.52},{29.47,58.43},{91.59,180.13},{64.46,123.22}, {81.92,164.97},{40.79,93.04},{98.66,170.59},{85.47,151.64}, {13.01,51.89},{69.39,134.86},{77.60,128.60},{89.83,157.49}, {22.48,52.09},{61.31,118.09},{39.29,91.09},{57.26,80.48}, {62.54,129.76},{30.22,81.60},{21.56,60.61},{65.89,99.43}, {78.90,151.00},{93.57,158.74},{38.70,76.63},{ 8.25,47.63}, {91.94,169.65},{54.05,110.82},{98.12,177.20},{17.59,55.03}, {74.27,140.33},{58.40,122.38},{ 6.27,35.39},{90.31,164.73}, {87.59,150.48},{47.17,97.51},{71.25,127.19},{56.58,106.56}, {52.14,108.51},{24.20,80.20},{30.66,79.03},{59.72,116.04}, {60.16,123.78},{22.09,68.92},{45.29,111.19},{92.16,156.21}, {45.05,113.83},{81.73,145.26},{34.92,85.83},{13.92,67.65}, {24.14,70.45},{81.85,153.42},{41.19,85.93},{ 0.67,39.26}, {36.09,90.68},{19.54,62.25},{33.38,74.35},{10.19,55.59}, {59.29,93.03},{11.58,62.19},{87.30,152.51},{25.43,59.13}, {74.35,134.81},{65.23,139.29},{32.23,83.27},{70.94,130.63}, {70.57,128.03},{15.13,65.98},{88.17,138.65},{ 9.49,43.25}, {68.37,120.54},{74.69,143.68},{50.10,119.13},{ 8.76,32.56}, {37.70,101.07},{ 4.04,42.48},{29.84,97.01},{69.88,130.23}, { 8.65,45.47},{66.47,118.24},{12.35,55.24},{20.41,64.50}, {91.28,166.16},{91.82,165.77},{10.65,52.39},{79.95,147.31}, {16.06,57.21},{71.00,131.19},{91.27,155.74},{28.57,60.74}, {31.63,92.00},{15.84,50.24},{80.22,135.00},{51.52,96.40}, {67.42,117.71},{15.46,65.55},{85.71,151.69},{99.32,165.22}, {28.38,107.61},{75.32,129.56},{96.48,161.61},{36.18,95.12}, {34.18,71.39},{ 7.77,40.69},{37.98,65.63},{68.95,131.41}, {34.08,78.15},{29.70,84.37},{92.61,178.05},{82.22,128.06}, { 1.32,32.39},{ 2.57,41.54},{72.80,142.40},{19.88,47.67}, {95.42,167.29},{ 7.56,63.21},{90.52,146.47},{67.80,115.47}, {89.41,162.67},{87.72,165.63},{71.28,138.33},{25.08,70.34}, {41.57,86.86},{51.45,104.07},{61.54,121.29},{32.53,93.45}, { 3.26,29.72},{74.50,137.34},{45.07,102.67},{67.55,124.04}, {38.14,88.93},{ 2.46,37.56},{21.27,66.20},{32.26,67.66}, {36.05,64.93},{78.82,147.50},{ 7.26,40.83},{86.28,157.01}, {49.02,95.59},{80.36,136.69},{ 2.94,46.23},{50.55,102.49}, {28.35,69.73},{19.99,77.88},{22.42,72.52},{15.98,58.85}, {68.79,130.10},{23.15,58.87},{ 3.27,40.79},{79.02,150.83}, {37.04,72.00},{45.11,79.16},{30.61,58.70},{74.69,133.55}, {25.47,71.56},{89.34,157.40},{61.39,125.26},{ 1.52,31.44}, {78.37,136.81},{48.33,123.74},{71.58,126.89},{ 5.20,49.01}, {90.20,142.37},{94.03,153.03},{97.27,167.60},{91.90,156.09}, {56.45,131.39},{59.09,118.83},{61.75,115.24},{22.31,75.23}, {73.98,142.59},{24.38,61.52},{73.46,110.70},{27.35,85.50}, {55.66,115.25},{67.56,119.99},{61.81,130.15},{12.05,48.88}, {84.16,146.44},{48.33,99.47},{83.37,138.40},{23.51,81.53}, {85.14,138.65},{92.12,176.32},{53.13,115.81},{90.59,162.45}, {92.52,182.64},{76.45,149.04},{48.40,96.60},{ 8.70,45.67}, {96.13,174.15},{21.67,43.90},{15.13,43.97},{73.66,137.40}, {80.31,136.39},{79.59,139.56},{55.33,112.94},{27.38,47.87} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } // Calculate the difference between two times. Returns zero on // success and the time difference through an argument. It will // be unsuccessful if the start time is after the end time. int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int d_sec = finish->tv_sec - start->tv_sec; long long int d_nsec = finish->tv_nsec - start->tv_nsec; if(d_nsec < 0 ) { d_sec--; d_nsec += 1000000000; } *difference = d_sec * 1000000000 + d_nsec; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } for(int j=0; j<n_data; j++) { error_sum_total += h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } error_sum_total = 0; } if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
a560d4f1c653bf55c32587b5215d7a357eb2a179.hip
// !!! This is a file automatically generated by hipify!!! // Program for Parallel Binary Search in CUDA // For Hadoop-CUDA Lab #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <assert.h> __device__ int get_index_to_check(int thread, int num_threads, int set_size, int offset) { // Integer division trick to round up return (((set_size + num_threads) / num_threads) * thread) + offset; } __global__ void p_ary_search(int search, int array_length, int *arr, int *ret_val ) { const int num_threads = blockDim.x * gridDim.x; const int thread = blockIdx.x * blockDim.x + threadIdx.x; //ret_val[0] = -1; //ret_val[1] = offset; int set_size = array_length; while(set_size != 0){ // Get the offset of the array, initially set to 0 int offset = ret_val[1]; // I think this is necessary in case a thread gets ahead, and resets offset before it's read // This isn't necessary for the unit tests to pass, but I still like it here __syncthreads(); // Get the next index to check int index_to_check = get_index_to_check(thread, num_threads, set_size, offset); // If the index is outside the bounds of the array then lets not check it if (index_to_check < array_length){ // If the next index is outside the bounds of the array, then set it to maximum array size int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset); if (next_index_to_check >= array_length){ next_index_to_check = array_length - 1; } // If we're at the mid section of the array reset the offset to this index if (search > arr[index_to_check] && (search < arr[next_index_to_check])) { ret_val[1] = index_to_check; } else if (search == arr[index_to_check]) { // Set the return var if we hit it ret_val[0] = index_to_check; } } // Since this is a p-ary search divide by our total threads to get the next set size set_size = set_size / num_threads; // Sync up so no threads jump ahead and get a bad offset __syncthreads(); } } int chop_position(int search, int *search_array, int array_length) { // Get the size of the array for future use int array_size = array_length * sizeof(int); // Don't bother with small arrays if (array_size == 0) return -1; // Setup array to use on device int *dev_arr; hipMalloc((void**)&dev_arr, array_size); // Copy search array values hipMemcpy(dev_arr, search_array, array_size, hipMemcpyHostToDevice); // return values here and on device int *ret_val = (int*)malloc(sizeof(int) * 2); ret_val[0] = -1; // return value ret_val[1] = 0; // offset array_length = array_length % 2 == 0 ? array_length : array_length - 1; // array size int *dev_ret_val; hipMalloc((void**)&dev_ret_val, sizeof(int) * 2); // Send in some intialized values hipMemcpy(dev_ret_val, ret_val, sizeof(int) * 2, hipMemcpyHostToDevice); // Launch kernel // This seems to be the best combo for p-ary search // Optimized around 10-15 registers per thread hipLaunchKernelGGL(( p_ary_search), dim3(16), dim3(64), 0, 0, search, array_length, dev_arr, dev_ret_val); // Get results hipMemcpy(ret_val, dev_ret_val, 2 * sizeof(int), hipMemcpyDeviceToHost); int ret = ret_val[0]; printf("Ret Val %i Offset %i\n", ret, ret_val[1]); // Free memory on device hipFree(dev_arr); hipFree(dev_ret_val); free(ret_val); return ret; } // Test region static int * build_array(int length) { int *ret_val = (int*)malloc(length * sizeof(int)); for (int i = 0; i < length; i++) { ret_val[i] = i * 2 - 1; } return ret_val; } static void test_array(int length, int search, int index) { printf("Length %i Search %i Index %i\n", length, search, index); // assert(index == chop_position(search, build_array(length), length) && "test_small_array()"); } static void test_arrays() { test_array(200, 200, -1); test_array(200, -1, 0); test_array(200, 1, 1); test_array(200, 29, 15); test_array(200, 129, 65); test_array(200, 395, 198); test_array(20000, 395, 198); test_array(2000000, 394, -1); test_array(20000000, 394, -1); } int main(){ test_arrays(); }
a560d4f1c653bf55c32587b5215d7a357eb2a179.cu
// Program for Parallel Binary Search in CUDA // For Hadoop-CUDA Lab #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <assert.h> __device__ int get_index_to_check(int thread, int num_threads, int set_size, int offset) { // Integer division trick to round up return (((set_size + num_threads) / num_threads) * thread) + offset; } __global__ void p_ary_search(int search, int array_length, int *arr, int *ret_val ) { const int num_threads = blockDim.x * gridDim.x; const int thread = blockIdx.x * blockDim.x + threadIdx.x; //ret_val[0] = -1; //ret_val[1] = offset; int set_size = array_length; while(set_size != 0){ // Get the offset of the array, initially set to 0 int offset = ret_val[1]; // I think this is necessary in case a thread gets ahead, and resets offset before it's read // This isn't necessary for the unit tests to pass, but I still like it here __syncthreads(); // Get the next index to check int index_to_check = get_index_to_check(thread, num_threads, set_size, offset); // If the index is outside the bounds of the array then lets not check it if (index_to_check < array_length){ // If the next index is outside the bounds of the array, then set it to maximum array size int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset); if (next_index_to_check >= array_length){ next_index_to_check = array_length - 1; } // If we're at the mid section of the array reset the offset to this index if (search > arr[index_to_check] && (search < arr[next_index_to_check])) { ret_val[1] = index_to_check; } else if (search == arr[index_to_check]) { // Set the return var if we hit it ret_val[0] = index_to_check; } } // Since this is a p-ary search divide by our total threads to get the next set size set_size = set_size / num_threads; // Sync up so no threads jump ahead and get a bad offset __syncthreads(); } } int chop_position(int search, int *search_array, int array_length) { // Get the size of the array for future use int array_size = array_length * sizeof(int); // Don't bother with small arrays if (array_size == 0) return -1; // Setup array to use on device int *dev_arr; cudaMalloc((void**)&dev_arr, array_size); // Copy search array values cudaMemcpy(dev_arr, search_array, array_size, cudaMemcpyHostToDevice); // return values here and on device int *ret_val = (int*)malloc(sizeof(int) * 2); ret_val[0] = -1; // return value ret_val[1] = 0; // offset array_length = array_length % 2 == 0 ? array_length : array_length - 1; // array size int *dev_ret_val; cudaMalloc((void**)&dev_ret_val, sizeof(int) * 2); // Send in some intialized values cudaMemcpy(dev_ret_val, ret_val, sizeof(int) * 2, cudaMemcpyHostToDevice); // Launch kernel // This seems to be the best combo for p-ary search // Optimized around 10-15 registers per thread p_ary_search<<<16, 64>>>(search, array_length, dev_arr, dev_ret_val); // Get results cudaMemcpy(ret_val, dev_ret_val, 2 * sizeof(int), cudaMemcpyDeviceToHost); int ret = ret_val[0]; printf("Ret Val %i Offset %i\n", ret, ret_val[1]); // Free memory on device cudaFree(dev_arr); cudaFree(dev_ret_val); free(ret_val); return ret; } // Test region static int * build_array(int length) { int *ret_val = (int*)malloc(length * sizeof(int)); for (int i = 0; i < length; i++) { ret_val[i] = i * 2 - 1; } return ret_val; } static void test_array(int length, int search, int index) { printf("Length %i Search %i Index %i\n", length, search, index); // assert(index == chop_position(search, build_array(length), length) && "test_small_array()"); } static void test_arrays() { test_array(200, 200, -1); test_array(200, -1, 0); test_array(200, 1, 1); test_array(200, 29, 15); test_array(200, 129, 65); test_array(200, 395, 198); test_array(20000, 395, 198); test_array(2000000, 394, -1); test_array(20000000, 394, -1); } int main(){ test_arrays(); }
a193412f6ba77e53703efa2660468e1f954fa10f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <IL/il.h> using img = std::vector<unsigned char>; __global__ void grayscaleKernel(unsigned char* r, unsigned char* g, unsigned char* b, unsigned char* out, int width, int height){ // 2D dimension grid int tidX = blockIdx.x * blockDim.x + threadIdx.x; int tidY = blockIdx.y * blockDim.y + threadIdx.y; if(tidX < width && tidY < height){ int matrixIndex = width*tidY + tidX; out[matrixIndex] = (307 * r[matrixIndex] + 604 * g[matrixIndex] + 113 * b[matrixIndex]) >> 10; } } int main( int argc, char * argv[] ) { std::string filename; if(argc <= 1){ std::cerr << "Error: Missing path to image file." << std::endl; std::exit(1); } filename = argv[ 1 ]; unsigned int images[2]; ilInit(); ilGenImages(2, images); ilBindImage(images[0]); ilLoadImage(filename.c_str()); auto const width = ilGetInteger(IL_IMAGE_WIDTH); auto const height = ilGetInteger(IL_IMAGE_HEIGHT); auto const size = width*height; auto const nbBytesImage = size * sizeof(unsigned char); // Image is stored as: rgbrgbrgb.... unsigned char * data = ilGetData(); unsigned char* redMatrix_h = nullptr; unsigned char* greenMatrix_h = nullptr; unsigned char* blueMatrix_h = nullptr; unsigned char* out_h = nullptr; unsigned char* redMatrix_d = nullptr; unsigned char* greenMatrix_d = nullptr; unsigned char* blueMatrix_d = nullptr; unsigned char* out_d = nullptr; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int const nbStreams = 10; hipStream_t streams[nbStreams]; for(int i=0; i<nbStreams; ++i){ hipStreamCreate(&streams[i]); } // Divide the image vertically by the number of streams int const chunkVectorHeight = height/nbStreams; int const chunkVectorHeightBytes = width * chunkVectorHeight * sizeof(unsigned char); // Allocate host matrices (R, G, B, and out) hipHostMalloc(&redMatrix_h, nbBytesImage); hipHostMalloc(&greenMatrix_h, nbBytesImage); hipHostMalloc(&blueMatrix_h, nbBytesImage); hipHostMalloc(&out_h, nbBytesImage); // Fill host matrices for(int i=0; i<size; ++i){ redMatrix_h[i] = data[i*3]; greenMatrix_h[i] = data[(i+1)*3]; blueMatrix_h[i] = data[(i+2)*3]; } // Allocate device matrices (R, G, B, and out) hipMalloc(&redMatrix_d, nbBytesImage); hipMalloc(&greenMatrix_d, nbBytesImage); hipMalloc(&blueMatrix_d, nbBytesImage); hipMalloc(&out_d, nbBytesImage); // Launch kernek within streams for(int i=0; i<nbStreams; ++i){ // We split the image vertically // the chunk is the width x chunk vertical // so we need to offset the matrix adress by currentStream // Then we can copy by chunk int offset = width * chunkVectorHeight * i; hipMemcpyAsync(redMatrix_d+offset, redMatrix_h+offset, chunkVectorHeightBytes, hipMemcpyHostToDevice, streams[i]); hipMemcpyAsync(greenMatrix_d+offset, greenMatrix_h+offset, chunkVectorHeightBytes, hipMemcpyHostToDevice, streams[i]); hipMemcpyAsync(blueMatrix_d+offset, blueMatrix_h+offset, chunkVectorHeightBytes, hipMemcpyHostToDevice, streams[i]); } // block/grid dimension setup dim3 block(32, 32); // grid size is automatically adjusted according the block size and the image size dim3 grid((width-1)/block.x + 1, (height-1)/(block.y*nbStreams) + 1); // Launch kernels for(int i=0; i<nbStreams; ++i){ hipEventRecord(start, streams[i]); int offset = width * chunkVectorHeight * i; hipLaunchKernelGGL(( grayscaleKernel), dim3(grid), dim3(block), 0, streams[i] , redMatrix_d+offset, greenMatrix_d+offset, blueMatrix_d+offset, out_d+offset, width, height/nbStreams); } // Put back GPU result to CPU by chunk (stream) for(int i=0; i<nbStreams; ++i){ int offset = width*chunkVectorHeight*i; hipMemcpyAsync(out_h+offset, out_d+offset, chunkVectorHeightBytes, hipMemcpyDeviceToHost, streams[i]); } // Synchronize streams since memcpy is async for(int i=0; i<nbStreams; ++i){ hipStreamSynchronize(streams[i]); hipEventRecord(stop, streams[i]); } // Show elapsed time hipEventSynchronize(stop); float ms; hipEventElapsedTime(&ms, start, stop); std::cout << ms << "ms"; // Write the image on disk ilBindImage( images[ 1 ] ); ilTexImage( width, height, 1, 1, IL_LUMINANCE, IL_UNSIGNED_BYTE, out_h); ilEnable(IL_FILE_OVERWRITE); ilSaveImage("out.jpg"); ilDeleteImages( 2, images ); // Finally free the host and devices ressources hipHostFree(redMatrix_h); hipHostFree(greenMatrix_h); hipHostFree(blueMatrix_h); hipHostFree(out_h); hipFree(redMatrix_d); hipFree(greenMatrix_d); hipFree(blueMatrix_d); hipFree(out_d); return 0; }
a193412f6ba77e53703efa2660468e1f954fa10f.cu
#include <iostream> #include <vector> #include <IL/il.h> using img = std::vector<unsigned char>; __global__ void grayscaleKernel(unsigned char* r, unsigned char* g, unsigned char* b, unsigned char* out, int width, int height){ // 2D dimension grid int tidX = blockIdx.x * blockDim.x + threadIdx.x; int tidY = blockIdx.y * blockDim.y + threadIdx.y; if(tidX < width && tidY < height){ int matrixIndex = width*tidY + tidX; out[matrixIndex] = (307 * r[matrixIndex] + 604 * g[matrixIndex] + 113 * b[matrixIndex]) >> 10; } } int main( int argc, char * argv[] ) { std::string filename; if(argc <= 1){ std::cerr << "Error: Missing path to image file." << std::endl; std::exit(1); } filename = argv[ 1 ]; unsigned int images[2]; ilInit(); ilGenImages(2, images); ilBindImage(images[0]); ilLoadImage(filename.c_str()); auto const width = ilGetInteger(IL_IMAGE_WIDTH); auto const height = ilGetInteger(IL_IMAGE_HEIGHT); auto const size = width*height; auto const nbBytesImage = size * sizeof(unsigned char); // Image is stored as: rgbrgbrgb.... unsigned char * data = ilGetData(); unsigned char* redMatrix_h = nullptr; unsigned char* greenMatrix_h = nullptr; unsigned char* blueMatrix_h = nullptr; unsigned char* out_h = nullptr; unsigned char* redMatrix_d = nullptr; unsigned char* greenMatrix_d = nullptr; unsigned char* blueMatrix_d = nullptr; unsigned char* out_d = nullptr; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int const nbStreams = 10; cudaStream_t streams[nbStreams]; for(int i=0; i<nbStreams; ++i){ cudaStreamCreate(&streams[i]); } // Divide the image vertically by the number of streams int const chunkVectorHeight = height/nbStreams; int const chunkVectorHeightBytes = width * chunkVectorHeight * sizeof(unsigned char); // Allocate host matrices (R, G, B, and out) cudaMallocHost(&redMatrix_h, nbBytesImage); cudaMallocHost(&greenMatrix_h, nbBytesImage); cudaMallocHost(&blueMatrix_h, nbBytesImage); cudaMallocHost(&out_h, nbBytesImage); // Fill host matrices for(int i=0; i<size; ++i){ redMatrix_h[i] = data[i*3]; greenMatrix_h[i] = data[(i+1)*3]; blueMatrix_h[i] = data[(i+2)*3]; } // Allocate device matrices (R, G, B, and out) cudaMalloc(&redMatrix_d, nbBytesImage); cudaMalloc(&greenMatrix_d, nbBytesImage); cudaMalloc(&blueMatrix_d, nbBytesImage); cudaMalloc(&out_d, nbBytesImage); // Launch kernek within streams for(int i=0; i<nbStreams; ++i){ // We split the image vertically // the chunk is the width x chunk vertical // so we need to offset the matrix adress by currentStream // Then we can copy by chunk int offset = width * chunkVectorHeight * i; cudaMemcpyAsync(redMatrix_d+offset, redMatrix_h+offset, chunkVectorHeightBytes, cudaMemcpyHostToDevice, streams[i]); cudaMemcpyAsync(greenMatrix_d+offset, greenMatrix_h+offset, chunkVectorHeightBytes, cudaMemcpyHostToDevice, streams[i]); cudaMemcpyAsync(blueMatrix_d+offset, blueMatrix_h+offset, chunkVectorHeightBytes, cudaMemcpyHostToDevice, streams[i]); } // block/grid dimension setup dim3 block(32, 32); // grid size is automatically adjusted according the block size and the image size dim3 grid((width-1)/block.x + 1, (height-1)/(block.y*nbStreams) + 1); // Launch kernels for(int i=0; i<nbStreams; ++i){ cudaEventRecord(start, streams[i]); int offset = width * chunkVectorHeight * i; grayscaleKernel<<<grid, block, 0, streams[i] >>>(redMatrix_d+offset, greenMatrix_d+offset, blueMatrix_d+offset, out_d+offset, width, height/nbStreams); } // Put back GPU result to CPU by chunk (stream) for(int i=0; i<nbStreams; ++i){ int offset = width*chunkVectorHeight*i; cudaMemcpyAsync(out_h+offset, out_d+offset, chunkVectorHeightBytes, cudaMemcpyDeviceToHost, streams[i]); } // Synchronize streams since memcpy is async for(int i=0; i<nbStreams; ++i){ cudaStreamSynchronize(streams[i]); cudaEventRecord(stop, streams[i]); } // Show elapsed time cudaEventSynchronize(stop); float ms; cudaEventElapsedTime(&ms, start, stop); std::cout << ms << "ms"; // Write the image on disk ilBindImage( images[ 1 ] ); ilTexImage( width, height, 1, 1, IL_LUMINANCE, IL_UNSIGNED_BYTE, out_h); ilEnable(IL_FILE_OVERWRITE); ilSaveImage("out.jpg"); ilDeleteImages( 2, images ); // Finally free the host and devices ressources cudaFreeHost(redMatrix_h); cudaFreeHost(greenMatrix_h); cudaFreeHost(blueMatrix_h); cudaFreeHost(out_h); cudaFree(redMatrix_d); cudaFree(greenMatrix_d); cudaFree(blueMatrix_d); cudaFree(out_d); return 0; }
d42c6a2c152efe1a928109305071c3e5b3e28990.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "decode_rotate.h" #include "utils.h" #include <algorithm> #include <cstdint> #include <thrust/device_ptr.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include <thrust/gather.h> #include <thrust/tabulate.h> #include <thrust/count.h> #include <thrust/find.h> #include <thrust/system/hip/detail/hipcub/hipcub.hpp> #include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh> namespace retinanet { namespace cuda { typedef __host__ __device__ struct float6 { float x, y, z, u, v, w; }; int decode_rotate(int batch_size, const void *const *inputs, void **outputs, size_t height, size_t width, size_t scale, size_t num_anchors, size_t num_classes, const std::vector<float> &anchors, float score_thresh, int top_n, void *workspace, size_t workspace_size, hipStream_t stream) { int scores_size = num_anchors * num_classes * height * width; if (!workspace || !workspace_size) { // Return required scratch space size cub style workspace_size = get_size_aligned<float>(anchors.size()); // anchors workspace_size += get_size_aligned<bool>(scores_size); // flags workspace_size += get_size_aligned<int>(scores_size); // indices workspace_size += get_size_aligned<int>(scores_size); // indices_sorted workspace_size += get_size_aligned<float>(scores_size); // scores workspace_size += get_size_aligned<float>(scores_size); // scores_sorted size_t temp_size_flag = 0; thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag, thrust::cuda_cub::hipcub::CountingInputIterator<int>(scores_size), (bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size); size_t temp_size_sort = 0; thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort, (float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size); workspace_size += ::max(temp_size_flag, temp_size_sort); return workspace_size; } auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size); hipMemcpyAsync(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, hipMemcpyHostToDevice, stream); auto on_stream = thrust::hip::par.on(stream); auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size); auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size); auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size); auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size); auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size); for (int batch = 0; batch < batch_size; batch++) { auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size; auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 6; //From 4 auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n; auto out_boxes = static_cast<float6 *>(outputs[1]) + batch * top_n; // From float4 auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n; // Discard scores below threshold thrust::transform(on_stream, in_scores, in_scores + scores_size, flags, thrust::placeholders::_1 > score_thresh); int *num_selected = reinterpret_cast<int *>(indices_sorted); thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size, thrust::cuda_cub::hipcub::CountingInputIterator<int>(0), flags, indices, num_selected, scores_size, stream); hipStreamSynchronize(stream); int num_detections = *thrust::device_pointer_cast(num_selected); // Only keep top n scores auto indices_filtered = indices; if (num_detections > top_n) { thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores); thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size, scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream); indices_filtered = indices_sorted; num_detections = top_n; } // Gather boxes bool has_anchors = !anchors.empty(); thrust::transform(on_stream, indices_filtered, indices_filtered + num_detections, thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)), [=] __device__ (int i) { int x = i % width; int y = (i / width) % height; int a = (i / num_classes / height / width) % num_anchors; int cls = (i / height / width) % num_classes; float6 box = float6{ in_boxes[((a * 6 + 0) * height + y) * width + x], in_boxes[((a * 6 + 1) * height + y) * width + x], in_boxes[((a * 6 + 2) * height + y) * width + x], in_boxes[((a * 6 + 3) * height + y) * width + x], in_boxes[((a * 6 + 4) * height + y) * width + x], in_boxes[((a * 6 + 5) * height + y) * width + x] }; if (has_anchors) { // Add anchors offsets to deltas float x = (i % width) * scale; float y = ((i / width) % height) * scale; float *d = anchors_d + 4*a; float x1 = x + d[0]; float y1 = y + d[1]; float x2 = x + d[2]; float y2 = y + d[3]; float w = x2 - x1 + 1.0f; float h = y2 - y1 + 1.0f; float pred_ctr_x = box.x * w + x1 + 0.5f * w; float pred_ctr_y = box.y * h + y1 + 0.5f * h; float pred_w = exp(box.z) * w; float pred_h = exp(box.u) * h; float pred_sin = box.v; float pred_cos = box.w; box = float6{ max(0.0f, pred_ctr_x - 0.5f * pred_w), max(0.0f, pred_ctr_y - 0.5f * pred_h), min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f), min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f), pred_sin, pred_cos }; } return thrust::make_tuple(in_scores[i], box, cls); }); // Zero-out unused scores if (num_detections < top_n) { thrust::fill(on_stream, out_scores + num_detections, out_scores + top_n, 0.0f); thrust::fill(on_stream, out_classes + num_detections, out_classes + top_n, 0.0f); } } return 0; } } }
d42c6a2c152efe1a928109305071c3e5b3e28990.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "decode_rotate.h" #include "utils.h" #include <algorithm> #include <cstdint> #include <thrust/device_ptr.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include <thrust/gather.h> #include <thrust/tabulate.h> #include <thrust/count.h> #include <thrust/find.h> #include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh> #include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh> namespace retinanet { namespace cuda { typedef __host__ __device__ struct float6 { float x, y, z, u, v, w; }; int decode_rotate(int batch_size, const void *const *inputs, void **outputs, size_t height, size_t width, size_t scale, size_t num_anchors, size_t num_classes, const std::vector<float> &anchors, float score_thresh, int top_n, void *workspace, size_t workspace_size, cudaStream_t stream) { int scores_size = num_anchors * num_classes * height * width; if (!workspace || !workspace_size) { // Return required scratch space size cub style workspace_size = get_size_aligned<float>(anchors.size()); // anchors workspace_size += get_size_aligned<bool>(scores_size); // flags workspace_size += get_size_aligned<int>(scores_size); // indices workspace_size += get_size_aligned<int>(scores_size); // indices_sorted workspace_size += get_size_aligned<float>(scores_size); // scores workspace_size += get_size_aligned<float>(scores_size); // scores_sorted size_t temp_size_flag = 0; thrust::cuda_cub::cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag, thrust::cuda_cub::cub::CountingInputIterator<int>(scores_size), (bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size); size_t temp_size_sort = 0; thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort, (float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size); workspace_size += std::max(temp_size_flag, temp_size_sort); return workspace_size; } auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size); cudaMemcpyAsync(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, cudaMemcpyHostToDevice, stream); auto on_stream = thrust::cuda::par.on(stream); auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size); auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size); auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size); auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size); auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size); for (int batch = 0; batch < batch_size; batch++) { auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size; auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 6; //From 4 auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n; auto out_boxes = static_cast<float6 *>(outputs[1]) + batch * top_n; // From float4 auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n; // Discard scores below threshold thrust::transform(on_stream, in_scores, in_scores + scores_size, flags, thrust::placeholders::_1 > score_thresh); int *num_selected = reinterpret_cast<int *>(indices_sorted); thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size, thrust::cuda_cub::cub::CountingInputIterator<int>(0), flags, indices, num_selected, scores_size, stream); cudaStreamSynchronize(stream); int num_detections = *thrust::device_pointer_cast(num_selected); // Only keep top n scores auto indices_filtered = indices; if (num_detections > top_n) { thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores); thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size, scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream); indices_filtered = indices_sorted; num_detections = top_n; } // Gather boxes bool has_anchors = !anchors.empty(); thrust::transform(on_stream, indices_filtered, indices_filtered + num_detections, thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)), [=] __device__ (int i) { int x = i % width; int y = (i / width) % height; int a = (i / num_classes / height / width) % num_anchors; int cls = (i / height / width) % num_classes; float6 box = float6{ in_boxes[((a * 6 + 0) * height + y) * width + x], in_boxes[((a * 6 + 1) * height + y) * width + x], in_boxes[((a * 6 + 2) * height + y) * width + x], in_boxes[((a * 6 + 3) * height + y) * width + x], in_boxes[((a * 6 + 4) * height + y) * width + x], in_boxes[((a * 6 + 5) * height + y) * width + x] }; if (has_anchors) { // Add anchors offsets to deltas float x = (i % width) * scale; float y = ((i / width) % height) * scale; float *d = anchors_d + 4*a; float x1 = x + d[0]; float y1 = y + d[1]; float x2 = x + d[2]; float y2 = y + d[3]; float w = x2 - x1 + 1.0f; float h = y2 - y1 + 1.0f; float pred_ctr_x = box.x * w + x1 + 0.5f * w; float pred_ctr_y = box.y * h + y1 + 0.5f * h; float pred_w = exp(box.z) * w; float pred_h = exp(box.u) * h; float pred_sin = box.v; float pred_cos = box.w; box = float6{ max(0.0f, pred_ctr_x - 0.5f * pred_w), max(0.0f, pred_ctr_y - 0.5f * pred_h), min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f), min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f), pred_sin, pred_cos }; } return thrust::make_tuple(in_scores[i], box, cls); }); // Zero-out unused scores if (num_detections < top_n) { thrust::fill(on_stream, out_scores + num_detections, out_scores + top_n, 0.0f); thrust::fill(on_stream, out_classes + num_detections, out_classes + top_n, 0.0f); } } return 0; } } }
93bfe6eb6c87a0db1ade63941a2be57e93799181.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "common.h" #include <pthread.h> #include <cstdio> #include <getopt.h> #include <libgen.h> #include "hip/hip_runtime.h" #if NCCL_MAJOR >= 2 ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble}; const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double"}; #else ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64}; const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"}; #endif ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin}; const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"}; thread_local int is_main_thread = 0; // Command line parameter defaults static int nThreads = 1; static int nGpus = 1; static size_t minBytes = 32*1024*1024; static size_t maxBytes = 32*1024*1024; static size_t stepBytes = 1*1024*1024; static size_t stepFactor = 1; static int datacheck = 1; static int warmup_iters = 5; static int iters = 20; static int agg_iters = 1; static int ncclop = ncclSum; static int nccltype = ncclFloat; static int ncclroot = 0; static int parallel_init = 0; static int blocking_coll = 0; double parsesize(char *value) { long long int units; double size; if (strchr(value, 'G') != NULL) { units=1024*1024*1024; } else if (strchr(value, 'M') != NULL) { units=1024*1024; } else if (strchr(value, 'K') != NULL) { units=1024; } else { units=1; } size = atof(value)*units; return size; } double DeltaMaxValue(ncclDataType_t type) { switch(type) { case ncclHalf: return 1e-2; case ncclFloat: return 1e-5; case ncclDouble: return 1e-12; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint8: //case ncclInt32: case ncclUint32: #endif case ncclInt64: case ncclUint64: return 1e-200; } return 1e-200; } template<typename T> __device__ double absDiff(T a, T b) { return fabs((double)(b - a)); } template<> __device__ double absDiff<half>(half a, half b) { float x = __half2float(a); float y = __half2float(b); return fabs((double)(y-x)); } template<typename T> __device__ float toFloat(T a) { return (float)a; } template<> __device__ float toFloat(half a) { return __half2float(a); } template<typename T, int BSIZE> __global__ void deltaKern(void* A_, void* B_, size_t count, double* max) { const T* A = (const T*)A_; const T* B = (const T*)B_; __shared__ double temp[BSIZE]; int tid = threadIdx.x; double locmax = 0.0; for(int i=tid; i<count; i+=blockDim.x) { double delta = absDiff(A[i], B[i]); if( delta > locmax ) { locmax = delta; #ifdef DEBUG_PRINT if (delta > .1) printf("Error at %d/%ld : %f != %f\n", i, count, toFloat(A[i]), toFloat(B[i])); #endif } } temp[tid] = locmax; for(int stride = BSIZE/2; stride > 1; stride>>=1) { __syncthreads(); if( tid < stride ) temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride]; } __syncthreads(); if( threadIdx.x == 0) *max = temp[0] > temp[1] ? temp[0] : temp[1]; } testResult_t CheckDelta(void* expected, void* results, size_t count, ncclDataType_t type, double* devmax) { switch (type) { case ncclHalf: hipLaunchKernelGGL(( deltaKern<half, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclFloat: hipLaunchKernelGGL(( deltaKern<float, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclDouble: hipLaunchKernelGGL(( deltaKern<double, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclChar: #if NCCL_MAJOR >= 2 case ncclUint8: #endif hipLaunchKernelGGL(( deltaKern<uint8_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint32: #endif hipLaunchKernelGGL(( deltaKern<uint32_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; case ncclInt64: case ncclUint64: hipLaunchKernelGGL(( deltaKern<uint64_t, 512>), dim3(1), dim3(512), 0, 0, results, expected, count, devmax); break; } CUDACHECK(hipDeviceSynchronize()); return testSuccess; } // For integer values, we use values between 0 and 255 template<typename T> __device__ T testValue(const size_t offset, const int rep, const int rank) { uint8_t v = (rep+rank+offset) % 256; return (T)v; } // For floating point datatype, we use values between 0 and 1 otherwise the // Product operation will produce NaNs. template<> __device__ double testValue<double>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(double)testValue<int>(offset, rep, rank)); } template<> __device__ float testValue<float>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(float)testValue<int>(offset, rep, rank)); } template<> __device__ half testValue<half>(const size_t offset, const int rep, const int rank) { return __float2half(testValue<float>(offset, rep, rank)); } // Operations template<typename T> __device__ T ncclOpSum(T a, T b) { return a+b; } template<typename T> __device__ T ncclOpProd(T a, T b) { return a*b; } template<typename T> __device__ T ncclOpMax(T a, T b) { return a>b ? a : b; } template<typename T> __device__ T ncclOpMin(T a, T b) { return a<b ? a : b; } // Definitions for half template<> __device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); } template<> __device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); } template<> __device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; } template<> __device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; } template<typename T, T (*Op)(T, T)> __global__ void InitDataReduceKernel(T* data, const size_t N, const size_t offset, const int rep, const int nranks) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) { T val = testValue<T>(o+offset, rep, 0); for (int i=1; i<nranks; i++) { val = Op(val, testValue<T>(o+offset, rep, i)); } data[o] = val; } } #define KERN(type, op) (void*)InitDataReduceKernel<type, op<type>> #define OPS(type) KERN(type, ncclOpSum), KERN(type, ncclOpProd), KERN(type, ncclOpMax), KERN(type, ncclOpMin) static void* const redInitDataKerns[ncclNumOps*ncclNumTypes] = { OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double) }; testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; void* args[5] = { (void*)&data, (void*)&count, (void*)&offset, (void*)&rep, (void*)&nranks }; CUDACHECK(cudaLaunchKernel(redInitDataKerns[type*ncclNumOps+op], grid, block, args, 0, hipStreamDefault)); return testSuccess; } template<typename T> __global__ void InitDataKernel(T* data, const size_t N, const int rep, const int rank) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) data[o] = testValue<T>(o, rep, rank); } static void* const initDataKerns[ncclNumTypes] = { (void*)InitDataKernel< int8_t>, (void*)InitDataKernel< uint8_t>, (void*)InitDataKernel< int32_t>, (void*)InitDataKernel<uint32_t>, (void*)InitDataKernel< int64_t>, (void*)InitDataKernel<uint64_t>, (void*)InitDataKernel< half>, (void*)InitDataKernel< float>, (void*)InitDataKernel< double> }; template<typename T> testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) { T* ptr = (T*)dest; hipLaunchKernelGGL(( InitDataKernel), dim3(16), dim3(512), 0, 0, ptr, N, rep, rank); return testSuccess; } testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; void* args[4] = { (void*)&data, (void*)&count, (void*)&rep, (void*)&rank }; CUDACHECK(cudaLaunchKernel(initDataKerns[type], grid, block, args, 0, hipStreamDefault)); return testSuccess; } void Barrier(struct threadArgs* args) { while (args->barrier[args->barrier_idx] != args->thread) pthread_yield(); args->barrier[args->barrier_idx] = args->thread + 1; if (args->thread+1 == args->nThreads) { #ifdef MPI_SUPPORT MPI_Barrier(MPI_COMM_WORLD); #endif args->barrier[args->barrier_idx] = 0; } else { while (args->barrier[args->barrier_idx]) pthread_yield(); } args->barrier_idx=!args->barrier_idx; } testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta) { size_t count = args->expectedBytes/wordSize(type); double maxDelta = 0.0; for (int i=0; i<args->nGpus; i++) { int device; int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); NCCLCHECK(ncclCommCuDevice(args->comms[i], &device)); CUDACHECK(hipSetDevice(device)); void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i]; TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->delta)); maxDelta = ::max(*(args->deltaHost), maxDelta); #ifdef DEBUG_PRINT if (rank == 0) { int *expectedHost = (int *)malloc(args->expectedBytes); int *dataHost = (int *)malloc(args->expectedBytes); hipMemcpy(expectedHost, args->expected[0], args->expectedBytes, hipMemcpyDeviceToHost); printf("\n Expected: "); for(int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, expectedHost[j]); } printf("\n"); hipMemcpy(dataHost, data, args->expectedBytes, hipMemcpyDeviceToHost); printf("\n Actual: "); for (int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, dataHost[j]); } printf("\n"); free(expectedHost); free(dataHost); } #endif } double nranks = args->nProcs*args->nThreads*args->nGpus; if (args->reportErrors && maxDelta > DeltaMaxValue(type)*(nranks - 1)) args->errors[0]++; *delta = maxDelta; return testSuccess; } testResult_t testStreamSynchronize(int ngpus, hipStream_t* streams, ncclComm_t* comms) { hipError_t cudaErr; int remaining = ngpus; int* done = (int*)malloc(sizeof(int)*ngpus); memset(done, 0, sizeof(int)*ngpus); while (remaining) { int idle = 1; for (int i=0; i<ngpus; i++) { if (done[i]) continue; cudaErr = hipStreamQuery(streams[i]); if (cudaErr == hipSuccess) { done[i] = 1; remaining--; idle = 0; continue; } if (cudaErr != hipErrorNotReady) CUDACHECK(cudaErr); #if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0) if (comms) { ncclResult_t ncclAsyncErr; NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr)); if (ncclAsyncErr != ncclSuccess) { // An asynchronous error happened. Stop the operation and destroy // the communicator for (int i=0; i<ngpus; i++) NCCLCHECK(ncclCommAbort(comms[i])); // Abort the perf test NCCLCHECK(ncclAsyncErr); } } #endif } // We might want to let other threads (including NCCL threads) use the CPU. if (idle) pthread_yield(); } free(done); return testSuccess; } testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) { size_t count = args->nbytes / wordSize(type); // Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange size_t totalnbytes = max(args->sendBytes, args->expectedBytes); size_t shift = (totalnbytes * iter) % args->maxbytes; if (shift + totalnbytes > args->maxbytes) shift = 0; if (args->nGpus > 1) NCCLCHECK(ncclGroupStart()); for (int i = 0; i < args->nGpus; i++) { #ifndef NCCL_MAJOR int cudaDev; NCCLCHECK(ncclCommCuDevice(args->comms[i], &cudaDev)); CUDACHECK(hipSetDevice(cudaDev)); #endif int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); char* recvBuff = ((char*)args->recvbuffs[i]) + shift; char* sendBuff = ((char*)args->sendbuffs[i]) + shift; TESTCHECK(args->collTest->runColl( (void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff), (void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff), count, type, op, root, args->comms[i], args->streams[i])); } if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd()); if (blocking_coll) { // Complete op before returning TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); } if (blocking_coll) Barrier(args); return testSuccess; } testResult_t completeColl(struct threadArgs* args) { if (blocking_coll) return testSuccess; TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); return testSuccess; } testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) { size_t count = args->nbytes / wordSize(type); // Sync TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); Barrier(args); // Performance Benchmark auto start = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < iters; iter++) { if (agg_iters>1) NCCLCHECK(ncclGroupStart()); for (int aiter = 0; aiter < agg_iters; aiter++) { TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter)); } if (agg_iters>1) NCCLCHECK(ncclGroupEnd()); } TESTCHECK(completeColl(args)); auto delta = std::chrono::high_resolution_clock::now() - start; double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count(); deltaSec = deltaSec/(iters*agg_iters); double algBw, busBw; args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus); Barrier(args); double maxDelta = 0; static __thread int rep = 0; rep++; if (datacheck) { // Initialize sendbuffs, recvbuffs and expected TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place)); //test validation in single itertion, should ideally be included into the multi-iteration run TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta)); //aggregate delta from all threads and procs Barrier(args); if (args->thread == 0) { for (int i=1; i<args->nThreads; i++) { maxDelta += args->deltaThreads[i]; } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &maxDelta, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); #endif } Barrier(args); } double timeUsec = deltaSec*1.0E6; char timeStr[10]; if (timeUsec > 10000.0) { sprintf(timeStr, "%7.0f", timeUsec); } else if (timeUsec > 100.0) { sprintf(timeStr, "%7.1f", timeUsec); } else { sprintf(timeStr, "%7.2f", timeUsec); } if (datacheck) { PRINT(" %7s %6.2f %6.2f %5.0le", timeStr, algBw, busBw, maxDelta); } else { PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A"); } args->bw[0] += busBw; args->bw_count[0]++; return testSuccess; } void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) { int nranks = args->nProcs*args->nGpus*args->nThreads; size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset; count = size / wordSize(type); args->collTest->getCollByteCount(&sendCount, &recvCount, &paramCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks); args->nbytes = paramCount * wordSize(type); args->sendBytes = sendCount * wordSize(type); args->expectedBytes = recvCount * wordSize(type); args->sendInplaceOffset = sendInplaceOffset * wordSize(type); args->recvInplaceOffset = recvInplaceOffset * wordSize(type); } testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) { // Warm-up for large size setupArgs(args->maxbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Warm-up for small size setupArgs(args->minbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Benchmark for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) { setupArgs(size, type, args); print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root); TESTCHECK(BenchTime(args, type, op, root, 0)); TESTCHECK(BenchTime(args, type, op, root, 1)); PRINT("\n"); } return testSuccess; } testResult_t threadRunTests(struct threadArgs* args) { // Set device to the first of our GPUs. If we don't do that, some operations // will be done on the current GPU (by default : 0) and if the GPUs are in // exclusive mode those operations will fail. int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus; CUDACHECK(hipSetDevice(gpuid)); TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop])); return testSuccess; } testResult_t threadInit(struct threadArgs* args) { char hostname[1024]; getHostName(hostname, 1024); int nranks = args->nProcs*args->nThreads*args->nGpus; //set main thread again is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0; NCCLCHECK(ncclGroupStart()); for (int i=0; i<args->nGpus; i++) { int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i; int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; CUDACHECK(hipSetDevice(gpuid)); NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank)); } NCCLCHECK(ncclGroupEnd()); TESTCHECK(threadRunTests(args)); for (int i=0; i<args->nGpus; i++) { NCCLCHECK(ncclCommDestroy(args->comms[i])); } return testSuccess; } void* threadLauncher(void* thread_) { struct testThread* thread = (struct testThread*)thread_; thread->ret = thread->func(&thread->args); return NULL; } testResult_t threadLaunch(struct testThread* thread) { pthread_create(&thread->thread, NULL, threadLauncher, thread); return testSuccess; } testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) { CUDACHECK(hipMalloc(sendbuff, nbytes)); CUDACHECK(hipMalloc(recvbuff, nbytes)); if (datacheck) CUDACHECK(hipMalloc(expected, recvBytes)); return testSuccess; } testResult_t run(); // Main function int main(int argc, char* argv[]) { // Make sure everyline is flushed so that we see the progress of the test setlinebuf(stdout); // Parse args int longindex; static struct option longopts[] = { {"nthreads", required_argument, 0, 't'}, {"ngpus", required_argument, 0, 'g'}, {"minbytes", required_argument, 0, 'b'}, {"maxbytes", required_argument, 0, 'e'}, {"stepbytes", required_argument, 0, 'i'}, {"stepfactor", required_argument, 0, 'f'}, {"iters", required_argument, 0, 'n'}, {"agg_iters", required_argument, 0, 'm'}, {"warmup_iters", required_argument, 0, 'w'}, {"parallel_init", required_argument, 0, 'p'}, {"check", required_argument, 0, 'c'}, {"op", required_argument, 0, 'o'}, {"datatype", required_argument, 0, 'd'}, {"root", required_argument, 0, 'r'}, {"blocking", required_argument, 0, 'z'}, {"help", no_argument, 0, 'h'} }; while(1) { int c; c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:h", longopts, &longindex); if (c == -1) break; switch(c) { case 't': nThreads = strtol(optarg, NULL, 0); break; case 'g': nGpus = strtol(optarg, NULL, 0); break; case 'b': minBytes = (size_t)parsesize(optarg); break; case 'e': maxBytes = (size_t)parsesize(optarg); break; case 'i': stepBytes = strtol(optarg, NULL, 0); break; case 'f': stepFactor = strtol(optarg, NULL, 0); break; case 'n': iters = (int)strtol(optarg, NULL, 0); break; case 'm': #if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2 agg_iters = (int)strtol(optarg, NULL, 0); #else printf("Option -m not supported before NCCL 2.2. Ignoring\n"); #endif break; case 'w': warmup_iters = (int)strtol(optarg, NULL, 0); break; case 'c': datacheck = (int)strtol(optarg, NULL, 0); break; case 'p': parallel_init = (int)strtol(optarg, NULL, 0); break; case 'o': ncclop = ncclstringtoop(optarg); break; case 'd': nccltype = ncclstringtotype(optarg); break; case 'r': ncclroot = strtol(optarg, NULL, 0); break; case 'z': blocking_coll = strtol(optarg, NULL, 0); break; case 'h': printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; default: printf("invalid option \n"); printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; } } #ifdef MPI_SUPPORT MPI_Init(&argc, &argv); #endif return run(); } testResult_t run() { int nProcs = 1, proc = 0; int localRank = 0; char hostname[1024]; getHostName(hostname, 1024); #ifdef MPI_SUPPORT MPI_Comm_size(MPI_COMM_WORLD, &nProcs); MPI_Comm_rank(MPI_COMM_WORLD, &proc); uint64_t hostHashs[nProcs]; hostHashs[proc] = getHostHash(hostname); MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD); for (int p=0; p<nProcs; p++) { if (p == proc) break; if (hostHashs[p] == hostHashs[proc]) localRank++; } #endif is_main_thread = (proc == 0) ? 1 : 0; PRINT("# nThread %d nGpus %d minBytes %ld maxBytes %ld step: %ld(%s) warmup iters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes, (stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck); if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n"); if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n"); PRINT("#\n"); PRINT("# Using devices\n"); #define MAX_LINE 2048 char line[MAX_LINE]; int len = 0; for (int i=0; i<nThreads*nGpus; i++) { int cudaDev = localRank*nThreads*nGpus+i; int rank = proc*nThreads*nGpus+i; hipDeviceProp_t prop; CUDACHECK(hipGetDeviceProperties(&prop, cudaDev)); len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n", rank, getpid(), hostname, cudaDev, prop.pciBusID, prop.name); } #if MPI_SUPPORT char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL; // Gather all output in rank order to root (0) MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD); if (proc == 0) { for (int p = 0; p < nProcs; p++) PRINT("%s", lines+MAX_LINE*p); free(lines); } #else PRINT("%s", line); #endif ncclUniqueId ncclId; if (proc == 0) { NCCLCHECK(ncclGetUniqueId(&ncclId)); } #ifdef MPI_SUPPORT MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD); #endif hipStream_t streams[nGpus*nThreads]; void* sendbuffs[nGpus*nThreads]; void* recvbuffs[nGpus*nThreads]; void* expected[nGpus*nThreads]; size_t sendBytes, recvBytes; ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads); for (int i=0; i<nGpus*nThreads; i++) { CUDACHECK(hipSetDevice(localRank*nThreads*nGpus+i)); TESTCHECK(AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus)); CUDACHECK(hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking)); } //if parallel init is not selected, use main thread to initialize NCCL ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus); if (!parallel_init) { if (nProcs == 1) { int gpuArray[nGpus*nThreads]; for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i; NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray)); } else { NCCLCHECK(ncclGroupStart()); for (int i=0; i<nGpus*nThreads; i++) { CUDACHECK(hipSetDevice(localRank*nThreads*nGpus+i)); NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i)); } NCCLCHECK(ncclGroupEnd()); } } int errors[nThreads]; double bw[nThreads]; double* delta; CUDACHECK(hipHostMalloc(&delta, sizeof(double)*nThreads, hipHostMallocPortable | hipHostMallocMapped)); int bw_count[nThreads]; for (int t=0; t<nThreads; t++) { bw[t] = 0.0; errors[t] = bw_count[t] = 0; } PRINT("#\n"); print_header(); int* sync = (int*)calloc(2, sizeof(int)); int* barrier = (int*)calloc(2, sizeof(int)); struct testThread threads[nThreads]; memset(threads, 0, sizeof(struct testThread)*nThreads); for (int t=nThreads-1; t>=0; t--) { threads[t].args.minbytes=minBytes; threads[t].args.maxbytes=maxBytes; threads[t].args.stepbytes=stepBytes; threads[t].args.stepfactor=stepFactor; threads[t].args.localRank = localRank; threads[t].args.nProcs=nProcs; threads[t].args.proc=proc; threads[t].args.nThreads=nThreads; threads[t].args.thread=t; threads[t].args.nGpus=nGpus; threads[t].args.sendbuffs = sendbuffs+t*nGpus; threads[t].args.recvbuffs = recvbuffs+t*nGpus; threads[t].args.expected = expected+t*nGpus; threads[t].args.ncclId = ncclId; threads[t].args.comms=comms+t*nGpus; threads[t].args.streams=streams+t*nGpus; threads[t].args.barrier = (volatile int*)barrier; threads[t].args.barrier_idx = 0; threads[t].args.sync = (volatile int*)sync; threads[t].args.sync_idx = 0; threads[t].args.deltaThreads = delta; threads[t].args.deltaHost = (delta + t); threads[t].args.delta = delta; threads[t].args.errors=errors+t; threads[t].args.bw=bw+t; threads[t].args.bw_count=bw_count+t; threads[t].args.reportErrors = 1; threads[t].func = parallel_init ? threadInit : threadRunTests; if (t) TESTCHECK(threadLaunch(threads+t)); else TESTCHECK(threads[t].func(&threads[t].args)); } // Wait for other threads and accumulate stats and errors for (int t=nThreads-1; t>=0; t--) { if (t) pthread_join(threads[t].thread, NULL); TESTCHECK(threads[t].ret); if (t) { errors[0] += errors[t]; bw[0] += bw[t]; bw_count[0] += bw_count[t]; } } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); #endif if (!parallel_init) { for(int i=0; i<nGpus*nThreads; ++i) NCCLCHECK(ncclCommDestroy(comms[i])); free(comms); } // Free off CUDA allocated memory for (int i=0; i<nGpus*nThreads; i++) { CUDACHECK(hipFree(sendbuffs[i])); CUDACHECK(hipFree(recvbuffs[i])); if (datacheck) CUDACHECK(hipFree(expected[i])); } CUDACHECK(hipHostFree(delta)); char* str = getenv("NCCL_TESTS_MIN_BW"); double check_avg_bw = str ? atof(str) : -1; bw[0] /= bw_count[0]; PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK"); PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK")); PRINT("#\n"); #ifdef MPI_SUPPORT MPI_Finalize(); #endif // 'cuda-memcheck --leak-check full' requires this hipDeviceReset(); if (errors[0] || bw[0] < check_avg_bw*(0.9)) exit(EXIT_FAILURE); else exit(EXIT_SUCCESS); }
93bfe6eb6c87a0db1ade63941a2be57e93799181.cu
/************************************************************************* * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "common.h" #include <pthread.h> #include <cstdio> #include <getopt.h> #include <libgen.h> #include "cuda.h" #if NCCL_MAJOR >= 2 ncclDataType_t test_types[ncclNumTypes] = {ncclInt8, ncclUint8, ncclInt32, ncclUint32, ncclInt64, ncclUint64, ncclHalf, ncclFloat, ncclDouble}; const char *test_typenames[ncclNumTypes] = {"int8", "uint8", "int32", "uint32", "int64", "uint64", "half", "float", "double"}; #else ncclDataType_t test_types[ncclNumTypes] = {ncclChar, ncclInt, ncclHalf, ncclFloat, ncclDouble, ncclInt64, ncclUint64}; const char *test_typenames[ncclNumTypes] = {"char", "int", "half", "float", "double", "int64", "uint64"}; #endif ncclRedOp_t test_ops[ncclNumOps] = {ncclSum, ncclProd, ncclMax, ncclMin}; const char *test_opnames[ncclNumOps] = {"sum", "prod", "max", "min"}; thread_local int is_main_thread = 0; // Command line parameter defaults static int nThreads = 1; static int nGpus = 1; static size_t minBytes = 32*1024*1024; static size_t maxBytes = 32*1024*1024; static size_t stepBytes = 1*1024*1024; static size_t stepFactor = 1; static int datacheck = 1; static int warmup_iters = 5; static int iters = 20; static int agg_iters = 1; static int ncclop = ncclSum; static int nccltype = ncclFloat; static int ncclroot = 0; static int parallel_init = 0; static int blocking_coll = 0; double parsesize(char *value) { long long int units; double size; if (strchr(value, 'G') != NULL) { units=1024*1024*1024; } else if (strchr(value, 'M') != NULL) { units=1024*1024; } else if (strchr(value, 'K') != NULL) { units=1024; } else { units=1; } size = atof(value)*units; return size; } double DeltaMaxValue(ncclDataType_t type) { switch(type) { case ncclHalf: return 1e-2; case ncclFloat: return 1e-5; case ncclDouble: return 1e-12; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint8: //case ncclInt32: case ncclUint32: #endif case ncclInt64: case ncclUint64: return 1e-200; } return 1e-200; } template<typename T> __device__ double absDiff(T a, T b) { return fabs((double)(b - a)); } template<> __device__ double absDiff<half>(half a, half b) { float x = __half2float(a); float y = __half2float(b); return fabs((double)(y-x)); } template<typename T> __device__ float toFloat(T a) { return (float)a; } template<> __device__ float toFloat(half a) { return __half2float(a); } template<typename T, int BSIZE> __global__ void deltaKern(void* A_, void* B_, size_t count, double* max) { const T* A = (const T*)A_; const T* B = (const T*)B_; __shared__ double temp[BSIZE]; int tid = threadIdx.x; double locmax = 0.0; for(int i=tid; i<count; i+=blockDim.x) { double delta = absDiff(A[i], B[i]); if( delta > locmax ) { locmax = delta; #ifdef DEBUG_PRINT if (delta > .1) printf("Error at %d/%ld : %f != %f\n", i, count, toFloat(A[i]), toFloat(B[i])); #endif } } temp[tid] = locmax; for(int stride = BSIZE/2; stride > 1; stride>>=1) { __syncthreads(); if( tid < stride ) temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride]; } __syncthreads(); if( threadIdx.x == 0) *max = temp[0] > temp[1] ? temp[0] : temp[1]; } testResult_t CheckDelta(void* expected, void* results, size_t count, ncclDataType_t type, double* devmax) { switch (type) { case ncclHalf: deltaKern<half, 512><<<1, 512>>>(results, expected, count, devmax); break; case ncclFloat: deltaKern<float, 512><<<1, 512>>>(results, expected, count, devmax); break; case ncclDouble: deltaKern<double, 512><<<1, 512>>>(results, expected, count, devmax); break; case ncclChar: #if NCCL_MAJOR >= 2 case ncclUint8: #endif deltaKern<uint8_t, 512><<<1, 512>>>(results, expected, count, devmax); break; case ncclInt: #if NCCL_MAJOR >= 2 case ncclUint32: #endif deltaKern<uint32_t, 512><<<1, 512>>>(results, expected, count, devmax); break; case ncclInt64: case ncclUint64: deltaKern<uint64_t, 512><<<1, 512>>>(results, expected, count, devmax); break; } CUDACHECK(cudaDeviceSynchronize()); return testSuccess; } // For integer values, we use values between 0 and 255 template<typename T> __device__ T testValue(const size_t offset, const int rep, const int rank) { uint8_t v = (rep+rank+offset) % 256; return (T)v; } // For floating point datatype, we use values between 0 and 1 otherwise the // Product operation will produce NaNs. template<> __device__ double testValue<double>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(double)testValue<int>(offset, rep, rank)); } template<> __device__ float testValue<float>(const size_t offset, const int rep, const int rank) { return 1.0/(1.0+(float)testValue<int>(offset, rep, rank)); } template<> __device__ half testValue<half>(const size_t offset, const int rep, const int rank) { return __float2half(testValue<float>(offset, rep, rank)); } // Operations template<typename T> __device__ T ncclOpSum(T a, T b) { return a+b; } template<typename T> __device__ T ncclOpProd(T a, T b) { return a*b; } template<typename T> __device__ T ncclOpMax(T a, T b) { return a>b ? a : b; } template<typename T> __device__ T ncclOpMin(T a, T b) { return a<b ? a : b; } // Definitions for half template<> __device__ half ncclOpSum(half a, half b) { return __float2half(__half2float(a)+__half2float(b)); } template<> __device__ half ncclOpProd(half a, half b) { return __float2half(__half2float(a)*__half2float(b)); } template<> __device__ half ncclOpMax(half a, half b) { return __half2float(a)>__half2float(b) ? a : b; } template<> __device__ half ncclOpMin(half a, half b) { return __half2float(a)<__half2float(b) ? a : b; } template<typename T, T (*Op)(T, T)> __global__ void InitDataReduceKernel(T* data, const size_t N, const size_t offset, const int rep, const int nranks) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) { T val = testValue<T>(o+offset, rep, 0); for (int i=1; i<nranks; i++) { val = Op(val, testValue<T>(o+offset, rep, i)); } data[o] = val; } } #define KERN(type, op) (void*)InitDataReduceKernel<type, op<type>> #define OPS(type) KERN(type, ncclOpSum), KERN(type, ncclOpProd), KERN(type, ncclOpMax), KERN(type, ncclOpMin) static void* const redInitDataKerns[ncclNumOps*ncclNumTypes] = { OPS(int8_t), OPS(uint8_t), OPS(int32_t), OPS(uint32_t), OPS(int64_t), OPS(uint64_t), OPS(half), OPS(float), OPS(double) }; testResult_t InitDataReduce(void* data, const size_t count, const size_t offset, ncclDataType_t type, ncclRedOp_t op, const int rep, const int nranks) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; void* args[5] = { (void*)&data, (void*)&count, (void*)&offset, (void*)&rep, (void*)&nranks }; CUDACHECK(cudaLaunchKernel(redInitDataKerns[type*ncclNumOps+op], grid, block, args, 0, cudaStreamDefault)); return testSuccess; } template<typename T> __global__ void InitDataKernel(T* data, const size_t N, const int rep, const int rank) { for (size_t o=blockIdx.x*blockDim.x+threadIdx.x; o<N; o+=gridDim.x*blockDim.x) data[o] = testValue<T>(o, rep, rank); } static void* const initDataKerns[ncclNumTypes] = { (void*)InitDataKernel< int8_t>, (void*)InitDataKernel< uint8_t>, (void*)InitDataKernel< int32_t>, (void*)InitDataKernel<uint32_t>, (void*)InitDataKernel< int64_t>, (void*)InitDataKernel<uint64_t>, (void*)InitDataKernel< half>, (void*)InitDataKernel< float>, (void*)InitDataKernel< double> }; template<typename T> testResult_t InitDataType(void* dest, const size_t N, const int rep, const int rank) { T* ptr = (T*)dest; InitDataKernel<<<16, 512>>>(ptr, N, rep, rank); return testSuccess; } testResult_t InitData(void* data, const size_t count, ncclDataType_t type, const int rep, const int rank) { dim3 grid = { 32, 1, 1 }; dim3 block = { 256, 1, 1 }; void* args[4] = { (void*)&data, (void*)&count, (void*)&rep, (void*)&rank }; CUDACHECK(cudaLaunchKernel(initDataKerns[type], grid, block, args, 0, cudaStreamDefault)); return testSuccess; } void Barrier(struct threadArgs* args) { while (args->barrier[args->barrier_idx] != args->thread) pthread_yield(); args->barrier[args->barrier_idx] = args->thread + 1; if (args->thread+1 == args->nThreads) { #ifdef MPI_SUPPORT MPI_Barrier(MPI_COMM_WORLD); #endif args->barrier[args->barrier_idx] = 0; } else { while (args->barrier[args->barrier_idx]) pthread_yield(); } args->barrier_idx=!args->barrier_idx; } testResult_t CheckData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, double *delta) { size_t count = args->expectedBytes/wordSize(type); double maxDelta = 0.0; for (int i=0; i<args->nGpus; i++) { int device; int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); NCCLCHECK(ncclCommCuDevice(args->comms[i], &device)); CUDACHECK(cudaSetDevice(device)); void *data = in_place ? ((void *)((uintptr_t)args->recvbuffs[i] + args->recvInplaceOffset*rank)) : args->recvbuffs[i]; TESTCHECK(CheckDelta(data , args->expected[i], count, type, args->delta)); maxDelta = std::max(*(args->deltaHost), maxDelta); #ifdef DEBUG_PRINT if (rank == 0) { int *expectedHost = (int *)malloc(args->expectedBytes); int *dataHost = (int *)malloc(args->expectedBytes); cudaMemcpy(expectedHost, args->expected[0], args->expectedBytes, cudaMemcpyDeviceToHost); printf("\n Expected: "); for(int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, expectedHost[j]); } printf("\n"); cudaMemcpy(dataHost, data, args->expectedBytes, cudaMemcpyDeviceToHost); printf("\n Actual: "); for (int j=0; j<args->expectedBytes/sizeof(int); j++) { printf("%d:%d ", j, dataHost[j]); } printf("\n"); free(expectedHost); free(dataHost); } #endif } double nranks = args->nProcs*args->nThreads*args->nGpus; if (args->reportErrors && maxDelta > DeltaMaxValue(type)*(nranks - 1)) args->errors[0]++; *delta = maxDelta; return testSuccess; } testResult_t testStreamSynchronize(int ngpus, cudaStream_t* streams, ncclComm_t* comms) { cudaError_t cudaErr; int remaining = ngpus; int* done = (int*)malloc(sizeof(int)*ngpus); memset(done, 0, sizeof(int)*ngpus); while (remaining) { int idle = 1; for (int i=0; i<ngpus; i++) { if (done[i]) continue; cudaErr = cudaStreamQuery(streams[i]); if (cudaErr == cudaSuccess) { done[i] = 1; remaining--; idle = 0; continue; } if (cudaErr != cudaErrorNotReady) CUDACHECK(cudaErr); #if NCCL_VERSION_CODE >= NCCL_VERSION(2,4,0) if (comms) { ncclResult_t ncclAsyncErr; NCCLCHECK(ncclCommGetAsyncError(comms[i], &ncclAsyncErr)); if (ncclAsyncErr != ncclSuccess) { // An asynchronous error happened. Stop the operation and destroy // the communicator for (int i=0; i<ngpus; i++) NCCLCHECK(ncclCommAbort(comms[i])); // Abort the perf test NCCLCHECK(ncclAsyncErr); } } #endif } // We might want to let other threads (including NCCL threads) use the CPU. if (idle) pthread_yield(); } free(done); return testSuccess; } testResult_t startColl(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place, int iter) { size_t count = args->nbytes / wordSize(type); // Try to change offset for each iteration so that we avoid cache effects and catch race conditions in ptrExchange size_t totalnbytes = max(args->sendBytes, args->expectedBytes); size_t shift = (totalnbytes * iter) % args->maxbytes; if (shift + totalnbytes > args->maxbytes) shift = 0; if (args->nGpus > 1) NCCLCHECK(ncclGroupStart()); for (int i = 0; i < args->nGpus; i++) { #ifndef NCCL_MAJOR int cudaDev; NCCLCHECK(ncclCommCuDevice(args->comms[i], &cudaDev)); CUDACHECK(cudaSetDevice(cudaDev)); #endif int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); char* recvBuff = ((char*)args->recvbuffs[i]) + shift; char* sendBuff = ((char*)args->sendbuffs[i]) + shift; TESTCHECK(args->collTest->runColl( (void*)(in_place ? recvBuff + args->sendInplaceOffset*rank : sendBuff), (void*)(in_place ? recvBuff + args->recvInplaceOffset*rank : recvBuff), count, type, op, root, args->comms[i], args->streams[i])); } if (args->nGpus > 1) NCCLCHECK(ncclGroupEnd()); if (blocking_coll) { // Complete op before returning TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); } if (blocking_coll) Barrier(args); return testSuccess; } testResult_t completeColl(struct threadArgs* args) { if (blocking_coll) return testSuccess; TESTCHECK(testStreamSynchronize(args->nGpus, args->streams, args->comms)); return testSuccess; } testResult_t BenchTime(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int in_place) { size_t count = args->nbytes / wordSize(type); // Sync TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); Barrier(args); // Performance Benchmark auto start = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < iters; iter++) { if (agg_iters>1) NCCLCHECK(ncclGroupStart()); for (int aiter = 0; aiter < agg_iters; aiter++) { TESTCHECK(startColl(args, type, op, root, in_place, iter*agg_iters+aiter)); } if (agg_iters>1) NCCLCHECK(ncclGroupEnd()); } TESTCHECK(completeColl(args)); auto delta = std::chrono::high_resolution_clock::now() - start; double deltaSec = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count(); deltaSec = deltaSec/(iters*agg_iters); double algBw, busBw; args->collTest->getBw(count, wordSize(type), deltaSec, &algBw, &busBw, args->nProcs*args->nThreads*args->nGpus); Barrier(args); double maxDelta = 0; static __thread int rep = 0; rep++; if (datacheck) { // Initialize sendbuffs, recvbuffs and expected TESTCHECK(args->collTest->initData(args, type, op, root, rep, in_place)); //test validation in single itertion, should ideally be included into the multi-iteration run TESTCHECK(startColl(args, type, op, root, in_place, 0)); TESTCHECK(completeColl(args)); TESTCHECK(CheckData(args, type, op, root, in_place, &maxDelta)); //aggregate delta from all threads and procs Barrier(args); if (args->thread == 0) { for (int i=1; i<args->nThreads; i++) { maxDelta += args->deltaThreads[i]; } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &maxDelta, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); #endif } Barrier(args); } double timeUsec = deltaSec*1.0E6; char timeStr[10]; if (timeUsec > 10000.0) { sprintf(timeStr, "%7.0f", timeUsec); } else if (timeUsec > 100.0) { sprintf(timeStr, "%7.1f", timeUsec); } else { sprintf(timeStr, "%7.2f", timeUsec); } if (datacheck) { PRINT(" %7s %6.2f %6.2f %5.0le", timeStr, algBw, busBw, maxDelta); } else { PRINT(" %7s %6.2f %6.2f %5s", timeStr, algBw, busBw, "N/A"); } args->bw[0] += busBw; args->bw_count[0]++; return testSuccess; } void setupArgs(size_t size, ncclDataType_t type, struct threadArgs* args) { int nranks = args->nProcs*args->nGpus*args->nThreads; size_t count, sendCount, recvCount, paramCount, sendInplaceOffset, recvInplaceOffset; count = size / wordSize(type); args->collTest->getCollByteCount(&sendCount, &recvCount, &paramCount, &sendInplaceOffset, &recvInplaceOffset, (size_t)count, (size_t)nranks); args->nbytes = paramCount * wordSize(type); args->sendBytes = sendCount * wordSize(type); args->expectedBytes = recvCount * wordSize(type); args->sendInplaceOffset = sendInplaceOffset * wordSize(type); args->recvInplaceOffset = recvInplaceOffset * wordSize(type); } testResult_t TimeTest(struct threadArgs* args, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName, int root) { // Warm-up for large size setupArgs(args->maxbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Warm-up for small size setupArgs(args->minbytes, type, args); for (int iter = 0; iter < warmup_iters; iter++) { TESTCHECK(startColl(args, type, op, root, 0, iter)); } TESTCHECK(completeColl(args)); // Benchmark for (size_t size = args->minbytes; size<=args->maxbytes; size = ((args->stepfactor > 1) ? size*args->stepfactor : size+args->stepbytes)) { setupArgs(size, type, args); print_line_header(max(args->sendBytes, args->expectedBytes), args->nbytes / wordSize(type), typeName, opName, root); TESTCHECK(BenchTime(args, type, op, root, 0)); TESTCHECK(BenchTime(args, type, op, root, 1)); PRINT("\n"); } return testSuccess; } testResult_t threadRunTests(struct threadArgs* args) { // Set device to the first of our GPUs. If we don't do that, some operations // will be done on the current GPU (by default : 0) and if the GPUs are in // exclusive mode those operations will fail. int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus; CUDACHECK(cudaSetDevice(gpuid)); TESTCHECK(ncclTestEngine.runTest(args, ncclroot, (ncclDataType_t)nccltype, test_typenames[nccltype], (ncclRedOp_t)ncclop, test_opnames[ncclop])); return testSuccess; } testResult_t threadInit(struct threadArgs* args) { char hostname[1024]; getHostName(hostname, 1024); int nranks = args->nProcs*args->nThreads*args->nGpus; //set main thread again is_main_thread = (args->proc == 0 && args->thread == 0) ? 1 : 0; NCCLCHECK(ncclGroupStart()); for (int i=0; i<args->nGpus; i++) { int rank = args->proc*args->nThreads*args->nGpus + args->thread*args->nGpus + i; int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; CUDACHECK(cudaSetDevice(gpuid)); NCCLCHECK(ncclCommInitRank(args->comms+i, nranks, args->ncclId, rank)); } NCCLCHECK(ncclGroupEnd()); TESTCHECK(threadRunTests(args)); for (int i=0; i<args->nGpus; i++) { NCCLCHECK(ncclCommDestroy(args->comms[i])); } return testSuccess; } void* threadLauncher(void* thread_) { struct testThread* thread = (struct testThread*)thread_; thread->ret = thread->func(&thread->args); return NULL; } testResult_t threadLaunch(struct testThread* thread) { pthread_create(&thread->thread, NULL, threadLauncher, thread); return testSuccess; } testResult_t AllocateBuffs(void **sendbuff, size_t sendBytes, void **recvbuff, size_t recvBytes, void **expected, size_t nbytes, int nranks) { CUDACHECK(cudaMalloc(sendbuff, nbytes)); CUDACHECK(cudaMalloc(recvbuff, nbytes)); if (datacheck) CUDACHECK(cudaMalloc(expected, recvBytes)); return testSuccess; } testResult_t run(); // Main function int main(int argc, char* argv[]) { // Make sure everyline is flushed so that we see the progress of the test setlinebuf(stdout); // Parse args int longindex; static struct option longopts[] = { {"nthreads", required_argument, 0, 't'}, {"ngpus", required_argument, 0, 'g'}, {"minbytes", required_argument, 0, 'b'}, {"maxbytes", required_argument, 0, 'e'}, {"stepbytes", required_argument, 0, 'i'}, {"stepfactor", required_argument, 0, 'f'}, {"iters", required_argument, 0, 'n'}, {"agg_iters", required_argument, 0, 'm'}, {"warmup_iters", required_argument, 0, 'w'}, {"parallel_init", required_argument, 0, 'p'}, {"check", required_argument, 0, 'c'}, {"op", required_argument, 0, 'o'}, {"datatype", required_argument, 0, 'd'}, {"root", required_argument, 0, 'r'}, {"blocking", required_argument, 0, 'z'}, {"help", no_argument, 0, 'h'} }; while(1) { int c; c = getopt_long(argc, argv, "t:g:b:e:i:f:n:m:w:p:c:o:d:r:z:h", longopts, &longindex); if (c == -1) break; switch(c) { case 't': nThreads = strtol(optarg, NULL, 0); break; case 'g': nGpus = strtol(optarg, NULL, 0); break; case 'b': minBytes = (size_t)parsesize(optarg); break; case 'e': maxBytes = (size_t)parsesize(optarg); break; case 'i': stepBytes = strtol(optarg, NULL, 0); break; case 'f': stepFactor = strtol(optarg, NULL, 0); break; case 'n': iters = (int)strtol(optarg, NULL, 0); break; case 'm': #if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2 agg_iters = (int)strtol(optarg, NULL, 0); #else printf("Option -m not supported before NCCL 2.2. Ignoring\n"); #endif break; case 'w': warmup_iters = (int)strtol(optarg, NULL, 0); break; case 'c': datacheck = (int)strtol(optarg, NULL, 0); break; case 'p': parallel_init = (int)strtol(optarg, NULL, 0); break; case 'o': ncclop = ncclstringtoop(optarg); break; case 'd': nccltype = ncclstringtotype(optarg); break; case 'r': ncclroot = strtol(optarg, NULL, 0); break; case 'z': blocking_coll = strtol(optarg, NULL, 0); break; case 'h': printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; default: printf("invalid option \n"); printf("USAGE: %s \n\t" "[-t,--nthreads <num threads>] \n\t" "[-g,--ngpus <gpus per thread>] \n\t" "[-b,--minbytes <min size in bytes>] \n\t" "[-e,--maxbytes <max size in bytes>] \n\t" "[-i,--stepbytes <increment size>] \n\t" "[-f,--stepfactor <increment factor>] \n\t" "[-n,--iters <iteration count>] \n\t" "[-m,--agg_iters <aggregated iteration count>] \n\t" "[-w,--warmup_iters <warmup iteration count>] \n\t" "[-p,--parallel_init <0/1>] \n\t" "[-c,--check <0/1>] \n\t" "[-o,--op <sum/prod/min/max/all>] \n\t" "[-d,--datatype <nccltype/all>] \n\t" "[-r,--root <root>] \n\t" "[-z,--blocking <0/1>] \n\t" "[-h,--help]\n", basename(argv[0])); return 0; } } #ifdef MPI_SUPPORT MPI_Init(&argc, &argv); #endif return run(); } testResult_t run() { int nProcs = 1, proc = 0; int localRank = 0; char hostname[1024]; getHostName(hostname, 1024); #ifdef MPI_SUPPORT MPI_Comm_size(MPI_COMM_WORLD, &nProcs); MPI_Comm_rank(MPI_COMM_WORLD, &proc); uint64_t hostHashs[nProcs]; hostHashs[proc] = getHostHash(hostname); MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD); for (int p=0; p<nProcs; p++) { if (p == proc) break; if (hostHashs[p] == hostHashs[proc]) localRank++; } #endif is_main_thread = (proc == 0) ? 1 : 0; PRINT("# nThread %d nGpus %d minBytes %ld maxBytes %ld step: %ld(%s) warmup iters: %d iters: %d validation: %d \n", nThreads, nGpus, minBytes, maxBytes, (stepFactor > 1)?stepFactor:stepBytes, (stepFactor > 1)?"factor":"bytes", warmup_iters, iters, datacheck); if (blocking_coll) PRINT("# Blocking Enabled: wait for completion and barrier after each collective \n"); if (parallel_init) PRINT("# Parallel Init Enabled: threads call into NcclInitRank concurrently \n"); PRINT("#\n"); PRINT("# Using devices\n"); #define MAX_LINE 2048 char line[MAX_LINE]; int len = 0; for (int i=0; i<nThreads*nGpus; i++) { int cudaDev = localRank*nThreads*nGpus+i; int rank = proc*nThreads*nGpus+i; cudaDeviceProp prop; CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev)); len += snprintf(line+len, MAX_LINE-len, "# Rank %2d Pid %6d on %10s device %2d [0x%02x] %s\n", rank, getpid(), hostname, cudaDev, prop.pciBusID, prop.name); } #if MPI_SUPPORT char *lines = (proc == 0) ? (char *)malloc(nProcs*MAX_LINE) : NULL; // Gather all output in rank order to root (0) MPI_Gather(line, MAX_LINE, MPI_BYTE, lines, MAX_LINE, MPI_BYTE, 0, MPI_COMM_WORLD); if (proc == 0) { for (int p = 0; p < nProcs; p++) PRINT("%s", lines+MAX_LINE*p); free(lines); } #else PRINT("%s", line); #endif ncclUniqueId ncclId; if (proc == 0) { NCCLCHECK(ncclGetUniqueId(&ncclId)); } #ifdef MPI_SUPPORT MPI_Bcast(&ncclId, sizeof(ncclId), MPI_BYTE, 0, MPI_COMM_WORLD); #endif cudaStream_t streams[nGpus*nThreads]; void* sendbuffs[nGpus*nThreads]; void* recvbuffs[nGpus*nThreads]; void* expected[nGpus*nThreads]; size_t sendBytes, recvBytes; ncclTestEngine.getBuffSize(&sendBytes, &recvBytes, (size_t)maxBytes, (size_t)nProcs*nGpus*nThreads); for (int i=0; i<nGpus*nThreads; i++) { CUDACHECK(cudaSetDevice(localRank*nThreads*nGpus+i)); TESTCHECK(AllocateBuffs(sendbuffs+i, sendBytes, recvbuffs+i, recvBytes, expected+i, (size_t)maxBytes, nProcs*nThreads*nGpus)); CUDACHECK(cudaStreamCreateWithFlags(streams+i, cudaStreamNonBlocking)); } //if parallel init is not selected, use main thread to initialize NCCL ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nThreads*nGpus); if (!parallel_init) { if (nProcs == 1) { int gpuArray[nGpus*nThreads]; for (int i=0; i<nGpus*nThreads; i++) gpuArray[i] = i; NCCLCHECK(ncclCommInitAll(comms, nGpus*nThreads, gpuArray)); } else { NCCLCHECK(ncclGroupStart()); for (int i=0; i<nGpus*nThreads; i++) { CUDACHECK(cudaSetDevice(localRank*nThreads*nGpus+i)); NCCLCHECK(ncclCommInitRank(comms+i, nProcs*nThreads*nGpus, ncclId, proc*nThreads*nGpus+i)); } NCCLCHECK(ncclGroupEnd()); } } int errors[nThreads]; double bw[nThreads]; double* delta; CUDACHECK(cudaHostAlloc(&delta, sizeof(double)*nThreads, cudaHostAllocPortable | cudaHostAllocMapped)); int bw_count[nThreads]; for (int t=0; t<nThreads; t++) { bw[t] = 0.0; errors[t] = bw_count[t] = 0; } PRINT("#\n"); print_header(); int* sync = (int*)calloc(2, sizeof(int)); int* barrier = (int*)calloc(2, sizeof(int)); struct testThread threads[nThreads]; memset(threads, 0, sizeof(struct testThread)*nThreads); for (int t=nThreads-1; t>=0; t--) { threads[t].args.minbytes=minBytes; threads[t].args.maxbytes=maxBytes; threads[t].args.stepbytes=stepBytes; threads[t].args.stepfactor=stepFactor; threads[t].args.localRank = localRank; threads[t].args.nProcs=nProcs; threads[t].args.proc=proc; threads[t].args.nThreads=nThreads; threads[t].args.thread=t; threads[t].args.nGpus=nGpus; threads[t].args.sendbuffs = sendbuffs+t*nGpus; threads[t].args.recvbuffs = recvbuffs+t*nGpus; threads[t].args.expected = expected+t*nGpus; threads[t].args.ncclId = ncclId; threads[t].args.comms=comms+t*nGpus; threads[t].args.streams=streams+t*nGpus; threads[t].args.barrier = (volatile int*)barrier; threads[t].args.barrier_idx = 0; threads[t].args.sync = (volatile int*)sync; threads[t].args.sync_idx = 0; threads[t].args.deltaThreads = delta; threads[t].args.deltaHost = (delta + t); threads[t].args.delta = delta; threads[t].args.errors=errors+t; threads[t].args.bw=bw+t; threads[t].args.bw_count=bw_count+t; threads[t].args.reportErrors = 1; threads[t].func = parallel_init ? threadInit : threadRunTests; if (t) TESTCHECK(threadLaunch(threads+t)); else TESTCHECK(threads[t].func(&threads[t].args)); } // Wait for other threads and accumulate stats and errors for (int t=nThreads-1; t>=0; t--) { if (t) pthread_join(threads[t].thread, NULL); TESTCHECK(threads[t].ret); if (t) { errors[0] += errors[t]; bw[0] += bw[t]; bw_count[0] += bw_count[t]; } } #ifdef MPI_SUPPORT MPI_Allreduce(MPI_IN_PLACE, &errors[0], 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); #endif if (!parallel_init) { for(int i=0; i<nGpus*nThreads; ++i) NCCLCHECK(ncclCommDestroy(comms[i])); free(comms); } // Free off CUDA allocated memory for (int i=0; i<nGpus*nThreads; i++) { CUDACHECK(cudaFree(sendbuffs[i])); CUDACHECK(cudaFree(recvbuffs[i])); if (datacheck) CUDACHECK(cudaFree(expected[i])); } CUDACHECK(cudaFreeHost(delta)); char* str = getenv("NCCL_TESTS_MIN_BW"); double check_avg_bw = str ? atof(str) : -1; bw[0] /= bw_count[0]; PRINT("# Out of bounds values : %d %s\n", errors[0], errors[0] ? "FAILED" : "OK"); PRINT("# Avg bus bandwidth : %g %s\n", bw[0], check_avg_bw == -1 ? "" : (bw[0] < check_avg_bw*(0.9) ? "FAILED" : "OK")); PRINT("#\n"); #ifdef MPI_SUPPORT MPI_Finalize(); #endif // 'cuda-memcheck --leak-check full' requires this cudaDeviceReset(); if (errors[0] || bw[0] < check_avg_bw*(0.9)) exit(EXIT_FAILURE); else exit(EXIT_SUCCESS); }
a722d44e381e06fae2bfa636336ce8fa648df4e5.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <fstream> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <algorithm> #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <rocblas.h> #include "CycleTimer.h" #define BLOCKSIZE 1024 #define TILE_WIDTH 32 #define SCAN_BLOCK_DIM BLOCKSIZE #include "exclusiveScan.cu_inl" #include "neuralNetwork.h" using namespace std; __global__ void forward_prop_kernel(float *device_output, float *input, float *weights, int num_first, int num_second) { int Row = blockIdx.y*TILE_WIDTH + threadIdx.y; int Col = blockIdx.x*TILE_WIDTH + threadIdx.x; float Pvalue = 0; __shared__ float prefixSumOutput[BLOCKSIZE]; __shared__ float prefixSumScratch[2 * BLOCKSIZE]; for (int k = 0; k < num_second; ++k) Pvalue += input[Row*num_second+k] * weights[k*num_second+Col]; device_output[Row*num_second+Col] = Pvalue; __syncthreads(); sharedMemExclusiveScan(threadIdx.x, device_output, prefixSumOutput, prefixSumScratch, BLOCKSIZE); __syncthreads(); if (threadIdx.x == 0 && blockIdx.x < num_second) { device_output[blockIdx.x] = 1/(1+exp(-1*prefixSumOutput[num_first])); } } __global__ void forward_prop_kernel_batch(float *device_output, float *input, float *weights, int num_first, int num_second, int batchSize) { int linearThreadIndex = threadIdx.x; int unit = blockIdx.x%num_second; int batch = blockIdx.x/num_second; __shared__ float prefixSumInput[BLOCKSIZE]; __shared__ float prefixSumOutput[BLOCKSIZE]; __shared__ float prefixSumScratch[2 * BLOCKSIZE]; if (linearThreadIndex < num_first) { prefixSumInput[linearThreadIndex] = input[batch*linearThreadIndex] * weights[linearThreadIndex*num_second + unit]; } __syncthreads(); sharedMemExclusiveScan(linearThreadIndex, prefixSumInput, prefixSumOutput, prefixSumScratch, BLOCKSIZE); __syncthreads(); if (linearThreadIndex == 0 && unit < num_second) { device_output[batch*unit] = 1/(1+exp(-1*prefixSumOutput[num_first])); } } neuralNetwork::neuralNetwork(int nI, int nH, int nO, int bS) : nInput(nI), nHidden(nH), nOutput(nO), batchSize(bS) { inputNeurons = new( float[batchSize*(nInput + 1)] ); for (int b= 0; b<batchSize; b++) { for (int i=0; i<nInput+1; i++) { if (i==nInput) { inputNeurons[(b+1)*(nInput)] = -1; } else { inputNeurons[b*(nInput+1) + i] = 0; } } } hiddenNeurons = new( float[batchSize*(nHidden + 1)] ); for (int b=0; b<batchSize; b++) { for (int i=0; i<nHidden+1; i++) { if (i==nHidden) { hiddenNeurons[(b+1)*(nHidden)] = -1; } else { hiddenNeurons[b*(nHidden+1) + i] = 0; } } } outputNeurons = new( float[batchSize*(nOutput + 1)] ); for ( int i=0; i < batchSize*(nOutput+1); i++ ) { outputNeurons[i] = 0; } wInputHidden = new( float*[nInput + 1] ); wInputHidden[0] = new (float[(nInput + 1)*nHidden]); for ( int i=1; i <= nInput; i++ ) { wInputHidden[i] = wInputHidden[i-1] + nHidden; } for ( int i=0; i <= nInput; i++ ) { for ( int j=0; j < nHidden; j++ ) wInputHidden[i][j] = 0; } wHiddenOutput = new( float*[nHidden + 1] ); wHiddenOutput[0] = new (float[(nHidden + 1)*nOutput]); for ( int i=1; i <= nHidden; i++ ) { wHiddenOutput[i] = wHiddenOutput[i-1] + nOutput; } for ( int i=0; i <= nHidden; i++ ) { for ( int j=0; j < nOutput; j++ ) wHiddenOutput[i][j] = 0; } initializeWeights(); } neuralNetwork::~neuralNetwork() { delete[] inputNeurons; delete[] hiddenNeurons; delete[] outputNeurons; for (int i=0; i <= nInput; i++) delete[] wInputHidden[i]; delete[] wInputHidden; for (int j=0; j <= nHidden; j++) delete[] wHiddenOutput[j]; delete[] wHiddenOutput; hipFree(device_output1); hipFree(input); hipFree(w1); hipFree(device_output2); hipFree(hidden); hipFree(w2); } bool neuralNetwork::saveWeights(char* filename) { fstream outputFile; outputFile.open(filename, ios::out); if ( outputFile.is_open() ) { outputFile.precision(50); for ( int i=0; i <= nInput; i++ ) { for ( int j=0; j < nHidden; j++ ) { outputFile << wInputHidden[i][j] << ","; } } for ( int i=0; i <= nHidden; i++ ) { for ( int j=0; j < nOutput; j++ ) { outputFile << wHiddenOutput[i][j]; if ( i * nOutput + j + 1 != (nHidden + 1) * nOutput ) outputFile << ","; } } cout << endl << "Neuron weights saved to '" << filename << "'" << endl; outputFile.close(); return true; } else { cout << endl << "Error - Weight output file '" << filename << "' could not be created: " << endl; return false; } } double neuralNetwork::getSetAccuracy( std::vector<dataEntry*>& set ) { double incorrectResults = 0; for ( int tp = 0; tp < (int) set.size(); tp++) { feedForward( set[tp]->pattern ); int predicted = distance(outputNeurons, max_element(outputNeurons, outputNeurons + nOutput)); i int expected = distance(set[tp]->target, max_element(set[tp]->target, set[tp]->target + nOutput)); if (predicted != expected) incorrectResults++; } return 100 - (incorrectResults/set.size() * 100); } void neuralNetwork::initializeWeights() { double startTime = CycleTimer::currentSeconds(); hipMalloc(&device_output1, sizeof(float) * batchSize*nHidden); hipMalloc(&input, sizeof(float) * batchSize*(nInput+1)); hipMalloc(&w1, sizeof(float) * (nInput+1)*nHidden); hipMalloc(&device_output2, sizeof(float) * batchSize*nOutput); hipMalloc(&hidden, sizeof(float) * batchSize*(nHidden+1)); hipMalloc(&w2, sizeof(float) * (nHidden+1)*nOutput); for(int i = 0; i <= nInput; i++) { for(int j = 0; j < nHidden; j++) { wInputHidden[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05); } } for(int i = 0; i <= nHidden; i++) { for(int j = 0; j < nOutput; j++) { wHiddenOutput[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05); } } double endTime = CycleTimer::currentSeconds(); double overallDuration = endTime - startTime; printf("Time Taken Seq:%f\n", overallDuration); } inline float neuralNetwork::activationFunction( float x ) { return 1/(1+exp(-x)); } void neuralNetwork::feedForwardBatch(vector<float*> patternVector) { for (int b = 0; b<batchSize; b++) { for(int i = 0; i < nInput+1; i++) { if (i!=nInput) { inputNeurons[b*(nInput+1) + i] = patternVector[b][i]; } } } dim3 blockDim(1024,1); dim3 gridDim(nHidden*batchSize); hipMemcpy(input, inputNeurons, sizeof(float) * batchSize*(nInput+1), hipMemcpyHostToDevice); hipMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( forward_prop_kernel_batch), dim3(gridDim), dim3(blockDim), 0, 0, device_output1, input, w1, nInput+1, nHidden, batchSize); hipDeviceSynchronize(); hipMemcpy(hiddenNeurons, device_output1, batchSize*nHidden*sizeof(float), hipMemcpyDeviceToHost); dim3 gridDim2(nOutput*batchSize); hipMemcpy(hidden, hiddenNeurons, sizeof(float) * batchSize*(nHidden+1), hipMemcpyHostToDevice); hipMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( forward_prop_kernel_batch), dim3(gridDim2), dim3(blockDim), 0, 0, device_output2, hidden, w2, nHidden+1, nOutput,batchSize); hipDeviceSynchronize(); hipMemcpy(outputNeurons, device_output2, batchSize*nOutput*sizeof(float), hipMemcpyDeviceToHost); } void neuralNetwork::feedForward(float* pattern) { for(int i = 0; i < nInput; i++) { inputNeurons[i] = pattern[i]; } double startTime = CycleTimer::currentSeconds(); dim3 blockDim(1024, 1); dim3 gridDim(128); hipMemcpy(input, inputNeurons, sizeof(float) * (nInput+1), hipMemcpyHostToDevice); hipMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( forward_prop_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_output1, input, w1, nInput+1, nHidden); hipDeviceSynchronize(); hipMemcpy(hiddenNeurons, device_output1, nHidden*sizeof(float), hipMemcpyDeviceToHost); dim3 gridDim2(10); hipMemcpy(hidden, hiddenNeurons, sizeof(float) * (nHidden+1), hipMemcpyHostToDevice); hipMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( forward_prop_kernel), dim3(gridDim2), dim3(blockDim), 0, 0, device_output2, hidden, w2, nHidden+1, nOutput); hipDeviceSynchronize(); hipMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), hipMemcpyDeviceToHost); double endTime4 = CycleTimer::currentSeconds(); double time = endTime4 - startTime; } void neuralNetwork::printCudaInfo() { int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
a722d44e381e06fae2bfa636336ce8fa648df4e5.cu
#include <iostream> #include <vector> #include <fstream> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <algorithm> #include <omp.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <curand.h> #include <curand_kernel.h> #include <cublas_v2.h> #include "CycleTimer.h" #define BLOCKSIZE 1024 #define TILE_WIDTH 32 #define SCAN_BLOCK_DIM BLOCKSIZE #include "exclusiveScan.cu_inl" #include "neuralNetwork.h" using namespace std; __global__ void forward_prop_kernel(float *device_output, float *input, float *weights, int num_first, int num_second) { int Row = blockIdx.y*TILE_WIDTH + threadIdx.y; int Col = blockIdx.x*TILE_WIDTH + threadIdx.x; float Pvalue = 0; __shared__ float prefixSumOutput[BLOCKSIZE]; __shared__ float prefixSumScratch[2 * BLOCKSIZE]; for (int k = 0; k < num_second; ++k) Pvalue += input[Row*num_second+k] * weights[k*num_second+Col]; device_output[Row*num_second+Col] = Pvalue; __syncthreads(); sharedMemExclusiveScan(threadIdx.x, device_output, prefixSumOutput, prefixSumScratch, BLOCKSIZE); __syncthreads(); if (threadIdx.x == 0 && blockIdx.x < num_second) { device_output[blockIdx.x] = 1/(1+exp(-1*prefixSumOutput[num_first])); } } __global__ void forward_prop_kernel_batch(float *device_output, float *input, float *weights, int num_first, int num_second, int batchSize) { int linearThreadIndex = threadIdx.x; int unit = blockIdx.x%num_second; int batch = blockIdx.x/num_second; __shared__ float prefixSumInput[BLOCKSIZE]; __shared__ float prefixSumOutput[BLOCKSIZE]; __shared__ float prefixSumScratch[2 * BLOCKSIZE]; if (linearThreadIndex < num_first) { prefixSumInput[linearThreadIndex] = input[batch*linearThreadIndex] * weights[linearThreadIndex*num_second + unit]; } __syncthreads(); sharedMemExclusiveScan(linearThreadIndex, prefixSumInput, prefixSumOutput, prefixSumScratch, BLOCKSIZE); __syncthreads(); if (linearThreadIndex == 0 && unit < num_second) { device_output[batch*unit] = 1/(1+exp(-1*prefixSumOutput[num_first])); } } neuralNetwork::neuralNetwork(int nI, int nH, int nO, int bS) : nInput(nI), nHidden(nH), nOutput(nO), batchSize(bS) { inputNeurons = new( float[batchSize*(nInput + 1)] ); for (int b= 0; b<batchSize; b++) { for (int i=0; i<nInput+1; i++) { if (i==nInput) { inputNeurons[(b+1)*(nInput)] = -1; } else { inputNeurons[b*(nInput+1) + i] = 0; } } } hiddenNeurons = new( float[batchSize*(nHidden + 1)] ); for (int b=0; b<batchSize; b++) { for (int i=0; i<nHidden+1; i++) { if (i==nHidden) { hiddenNeurons[(b+1)*(nHidden)] = -1; } else { hiddenNeurons[b*(nHidden+1) + i] = 0; } } } outputNeurons = new( float[batchSize*(nOutput + 1)] ); for ( int i=0; i < batchSize*(nOutput+1); i++ ) { outputNeurons[i] = 0; } wInputHidden = new( float*[nInput + 1] ); wInputHidden[0] = new (float[(nInput + 1)*nHidden]); for ( int i=1; i <= nInput; i++ ) { wInputHidden[i] = wInputHidden[i-1] + nHidden; } for ( int i=0; i <= nInput; i++ ) { for ( int j=0; j < nHidden; j++ ) wInputHidden[i][j] = 0; } wHiddenOutput = new( float*[nHidden + 1] ); wHiddenOutput[0] = new (float[(nHidden + 1)*nOutput]); for ( int i=1; i <= nHidden; i++ ) { wHiddenOutput[i] = wHiddenOutput[i-1] + nOutput; } for ( int i=0; i <= nHidden; i++ ) { for ( int j=0; j < nOutput; j++ ) wHiddenOutput[i][j] = 0; } initializeWeights(); } neuralNetwork::~neuralNetwork() { delete[] inputNeurons; delete[] hiddenNeurons; delete[] outputNeurons; for (int i=0; i <= nInput; i++) delete[] wInputHidden[i]; delete[] wInputHidden; for (int j=0; j <= nHidden; j++) delete[] wHiddenOutput[j]; delete[] wHiddenOutput; cudaFree(device_output1); cudaFree(input); cudaFree(w1); cudaFree(device_output2); cudaFree(hidden); cudaFree(w2); } bool neuralNetwork::saveWeights(char* filename) { fstream outputFile; outputFile.open(filename, ios::out); if ( outputFile.is_open() ) { outputFile.precision(50); for ( int i=0; i <= nInput; i++ ) { for ( int j=0; j < nHidden; j++ ) { outputFile << wInputHidden[i][j] << ","; } } for ( int i=0; i <= nHidden; i++ ) { for ( int j=0; j < nOutput; j++ ) { outputFile << wHiddenOutput[i][j]; if ( i * nOutput + j + 1 != (nHidden + 1) * nOutput ) outputFile << ","; } } cout << endl << "Neuron weights saved to '" << filename << "'" << endl; outputFile.close(); return true; } else { cout << endl << "Error - Weight output file '" << filename << "' could not be created: " << endl; return false; } } double neuralNetwork::getSetAccuracy( std::vector<dataEntry*>& set ) { double incorrectResults = 0; for ( int tp = 0; tp < (int) set.size(); tp++) { feedForward( set[tp]->pattern ); int predicted = distance(outputNeurons, max_element(outputNeurons, outputNeurons + nOutput)); i int expected = distance(set[tp]->target, max_element(set[tp]->target, set[tp]->target + nOutput)); if (predicted != expected) incorrectResults++; } return 100 - (incorrectResults/set.size() * 100); } void neuralNetwork::initializeWeights() { double startTime = CycleTimer::currentSeconds(); cudaMalloc(&device_output1, sizeof(float) * batchSize*nHidden); cudaMalloc(&input, sizeof(float) * batchSize*(nInput+1)); cudaMalloc(&w1, sizeof(float) * (nInput+1)*nHidden); cudaMalloc(&device_output2, sizeof(float) * batchSize*nOutput); cudaMalloc(&hidden, sizeof(float) * batchSize*(nHidden+1)); cudaMalloc(&w2, sizeof(float) * (nHidden+1)*nOutput); for(int i = 0; i <= nInput; i++) { for(int j = 0; j < nHidden; j++) { wInputHidden[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05); } } for(int i = 0; i <= nHidden; i++) { for(int j = 0; j < nOutput; j++) { wHiddenOutput[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05); } } double endTime = CycleTimer::currentSeconds(); double overallDuration = endTime - startTime; printf("Time Taken Seq:%f\n", overallDuration); } inline float neuralNetwork::activationFunction( float x ) { return 1/(1+exp(-x)); } void neuralNetwork::feedForwardBatch(vector<float*> patternVector) { for (int b = 0; b<batchSize; b++) { for(int i = 0; i < nInput+1; i++) { if (i!=nInput) { inputNeurons[b*(nInput+1) + i] = patternVector[b][i]; } } } dim3 blockDim(1024,1); dim3 gridDim(nHidden*batchSize); cudaMemcpy(input, inputNeurons, sizeof(float) * batchSize*(nInput+1), cudaMemcpyHostToDevice); cudaMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), cudaMemcpyHostToDevice); forward_prop_kernel_batch<<<gridDim, blockDim>>>(device_output1, input, w1, nInput+1, nHidden, batchSize); cudaThreadSynchronize(); cudaMemcpy(hiddenNeurons, device_output1, batchSize*nHidden*sizeof(float), cudaMemcpyDeviceToHost); dim3 gridDim2(nOutput*batchSize); cudaMemcpy(hidden, hiddenNeurons, sizeof(float) * batchSize*(nHidden+1), cudaMemcpyHostToDevice); cudaMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), cudaMemcpyHostToDevice); forward_prop_kernel_batch<<<gridDim2, blockDim>>>(device_output2, hidden, w2, nHidden+1, nOutput,batchSize); cudaThreadSynchronize(); cudaMemcpy(outputNeurons, device_output2, batchSize*nOutput*sizeof(float), cudaMemcpyDeviceToHost); } void neuralNetwork::feedForward(float* pattern) { for(int i = 0; i < nInput; i++) { inputNeurons[i] = pattern[i]; } double startTime = CycleTimer::currentSeconds(); dim3 blockDim(1024, 1); dim3 gridDim(128); cudaMemcpy(input, inputNeurons, sizeof(float) * (nInput+1), cudaMemcpyHostToDevice); cudaMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), cudaMemcpyHostToDevice); forward_prop_kernel<<<gridDim, blockDim>>>(device_output1, input, w1, nInput+1, nHidden); cudaThreadSynchronize(); cudaMemcpy(hiddenNeurons, device_output1, nHidden*sizeof(float), cudaMemcpyDeviceToHost); dim3 gridDim2(10); cudaMemcpy(hidden, hiddenNeurons, sizeof(float) * (nHidden+1), cudaMemcpyHostToDevice); cudaMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), cudaMemcpyHostToDevice); forward_prop_kernel<<<gridDim2, blockDim>>>(device_output2, hidden, w2, nHidden+1, nOutput); cudaThreadSynchronize(); cudaMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), cudaMemcpyDeviceToHost); double endTime4 = CycleTimer::currentSeconds(); double time = endTime4 - startTime; } void neuralNetwork::printCudaInfo() { int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
46103e9d32dbc25541f3d097a350edc265d7929e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BrokenLineFitOnGPU.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv, uint32_t hitsInFit, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto tkidGPU = cms::cuda::make_device_unique<caConstants::tindex_type[]>(maxNumberOfConcurrentFits_, stream); auto hitsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<6>) / sizeof(double), stream); auto hits_geGPU = cms::cuda::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6xNf<6>) / sizeof(float), stream); auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // fit triplets hipLaunchKernelGGL(( kernel_BLFastFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 3, 3, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_BLFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); cudaCheck(hipGetLastError()); if (fitNas4_) { // fit all as 4 hipLaunchKernelGGL(( kernel_BLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 4, 8, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_BLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); } else { // fit quads hipLaunchKernelGGL(( kernel_BLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 4, 4, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_BLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); // fit penta (all 5) hipLaunchKernelGGL(( kernel_BLFastFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 5, 5, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_BLFit<5>), dim3(8), dim3(blockSize), 0, stream, tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); cudaCheck(hipGetLastError()); // fit sexta and above (as 6) hipLaunchKernelGGL(( kernel_BLFastFit<6>), dim3(4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 6, 8, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_BLFit<6>), dim3(4), dim3(blockSize), 0, stream, tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); cudaCheck(hipGetLastError()); } } // loop on concurrent fits }
46103e9d32dbc25541f3d097a350edc265d7929e.cu
#include "BrokenLineFitOnGPU.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv, uint32_t hitsInFit, uint32_t maxNumberOfTuples, cudaStream_t stream) { assert(tuples_); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto tkidGPU = cms::cuda::make_device_unique<caConstants::tindex_type[]>(maxNumberOfConcurrentFits_, stream); auto hitsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<6>) / sizeof(double), stream); auto hits_geGPU = cms::cuda::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6xNf<6>) / sizeof(float), stream); auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // fit triplets kernel_BLFastFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 3, 3, offset); cudaCheck(cudaGetLastError()); kernel_BLFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); cudaCheck(cudaGetLastError()); if (fitNas4_) { // fit all as 4 kernel_BLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 4, 8, offset); cudaCheck(cudaGetLastError()); kernel_BLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); } else { // fit quads kernel_BLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 4, 4, offset); cudaCheck(cudaGetLastError()); kernel_BLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); // fit penta (all 5) kernel_BLFastFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 5, 5, offset); cudaCheck(cudaGetLastError()); kernel_BLFit<5><<<8, blockSize, 0, stream>>>(tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); cudaCheck(cudaGetLastError()); // fit sexta and above (as 6) kernel_BLFastFit<6><<<4, blockSize, 0, stream>>>(tuples_, tupleMultiplicity_, hv, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), 6, 8, offset); cudaCheck(cudaGetLastError()); kernel_BLFit<6><<<4, blockSize, 0, stream>>>(tupleMultiplicity_, bField_, outputSoa_, tkidGPU.get(), hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get()); cudaCheck(cudaGetLastError()); } } // loop on concurrent fits }
1ce321a0bc26f8754e3a7f8a64d46c2b640fe57e.hip
// !!! This is a file automatically generated by hipify!!! /* * turbopix.cu * * Created on: Aug 20, 2011 * Author: unknown * Modified by: Alvaro Collet (acollet@cs.cmu.edu) * * Copyright (c) 2011, Carnegie Mellon University. * All rights reserved. * * Software License Agreement (BSD License) * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <turbopix.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <cutil.h> #include <assert.h> // #include "bmp_io.h" extern void gaussianFilter(float *d_src, float *d_dest, float *d_temp, int width, int height, float sigma, int order); extern int do_reduction_count(int* d_idata, int* d_odata, int n); #define STD_CONFIG dim3((img_width+15)>>4,(img_height+15)>>4),dim3(16,16) // Device global memory uchar4* d_uchar4_temp1; uchar1* d_uchar1_temp1; float* d_float_temp1; float* d_float_temp2; int* d_int_temp1; float* d_speed; float* d_speed_dx; float* d_speed_dy; hipArray* d_float_array; hipArray* d_int2_array; hipArray* d_int_array; float2* d_seed_locations; int2* d_indirection_map; int* d_assignment_map; // Tex references texture<uchar4, 1, hipReadModeNormalizedFloat> texref_uchar4_1d; texture<uchar1, 1, hipReadModeNormalizedFloat> texref_uchar_1d; texture<int2, 2, hipReadModeElementType> texref_int2_2d; texture<float, 2, hipReadModeElementType> texref_float_2d; texture<int, 2, hipReadModeElementType> texref_int_2d; // Device constant memory __device__ __constant__ float2 d_seed_coords_const[_MAX_SUPERPIXELS]; // Host globals //unsigned long* h_input_image; unsigned int N_SUPERPIXELS; uint64_t* h_input_image; unsigned long img_width; long img_height; long img_pitch; int img_pixels; int img_pixels_pow2; int img_realpixels; // // Device Code // // Converts a 32-bit RGB image to normalized float grayscale // texref_uchar4_1d : input image __global__ void k_rgb2grayf(float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockDim.x*blockIdx.x; int my_y = threadIdx.y + blockDim.y*blockIdx.y; if (my_x >= width || my_y >= height) return; const float4 transf = make_float4(0.2989, 0.5870, 0.1140, 0); float4 pixel = tex1Dfetch(texref_uchar4_1d, my_y*width+my_x); out[my_y*pitch+my_x] = transf.x*pixel.x + transf.y*pixel.y + transf.z*pixel.z; } // Converts an unsigned char grayscale image to normalized float grayscale // texref_uchar_1d : input image __global__ void k_gray2grayf(float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockDim.x*blockIdx.x; int my_y = threadIdx.y + blockDim.y*blockIdx.y; if (my_x >= width || my_y >= height) return; float1 pixel = tex1Dfetch(texref_uchar_1d, my_y*width+my_x); out[my_y*pitch+my_x] = pixel.x; } // Computes gradient and stores its magnitude // texref_float_2d : input __global__ void k_gradient_mag(float* out, int width, int height, int pitch) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if (x >= width || y >= height) return; int out_idx = y*pitch+x; float gx = 0.5f * (tex2D(texref_float_2d, x+1, y) - tex2D(texref_float_2d, x-1, y)); float gy = 0.5f * (tex2D(texref_float_2d, x, y+1) - tex2D(texref_float_2d, x, y-1)); out[out_idx] = sqrtf(gx*gx + gy*gy); } // Computes gradient and stores its x/y components // texref_float_2d : input __global__ void k_gradient_xy(float* out_x, float* out_y, int width, int height, int pitch) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if (x >= width || y >= height) return; int out_idx = y*pitch+x; float gx = 0.5f * (tex2D(texref_float_2d, x+1, y) - tex2D(texref_float_2d, x-1, y)); float gy = 0.5f * (tex2D(texref_float_2d, x, y+1) - tex2D(texref_float_2d, x, y-1)); out_x[out_idx] = gx; out_y[out_idx] = gy; } // Given a grid of rows*cols cells, each cell of size x_dist*y_dist, place a seed in each one. // The seed is moved within a 2rad*2rad box around the center point of each cell such that // it lies at a point of maximum speed. // texref_float_2d : input speed map __global__ void k_place_seeds(float2* out, int x_dist, int y_dist, int rad, int rows, int cols) { int2 coord; float max = 0.0f; int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= cols || my_y >= rows) return; int cen_x = my_x * x_dist + (x_dist>>1); int cen_y = my_y * y_dist + (y_dist>>1); for (int y = -rad; y < rad; y++) { for (int x = -rad; x < rad; x++) { float val = tex2D(texref_float_2d, cen_x + x, cen_y + y); if (val > max) { max = val; coord = make_int2(cen_x+x, cen_y+y); } } } out[my_x + my_y*cols] = make_float2((float)coord.x, (float)coord.y); } // Initialize psi using the seed locations, by computing a distance transform. Each // output pixel will contain the distance to the closest seed. // d_seed_coords_const : seed locations stored in constant memory __global__ void k_init_psi(float* out_psi, int* out_id, int n_seeds, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; float my_xf = (float)my_x; float my_yf = (float)my_y; float min_dist2 = 1e9; int id = 0; for (int i = 0; i < n_seeds; i++) { float dx = my_xf - d_seed_coords_const[i].x; float dy = my_yf - d_seed_coords_const[i].y; float dist2 = dx*dx + dy*dy; min_dist2 = fminf(dist2, min_dist2); if (dist2 <= 1.0f) { id = i+1; } } int idx = my_y*pitch + my_x; out_psi[idx] = sqrtf(min_dist2) - 1.0f; out_id[idx] = id; } // Given an image gradient magnitude map and a smoothed version of it, compute // the speed map. __global__ void k_calc_speed_based_on_gradient (float* grad, float* smooth_grad, float rsigma, float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch + my_x; float mag = grad[idx]; float smooth_mag = smooth_grad[idx] * rsigma; float normGradMag = (mag / (0.1f + smooth_mag)); out[idx] = expf(-normGradMag); } // Perform initial smoothing of the image. This kernel smoothes by one timestep. __global__ void k_smooth_image (float* out, float timestep, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch+my_x; // Read surrounding values float left = tex2D(texref_float_2d, my_x-1, my_y); float right = tex2D(texref_float_2d, my_x+1, my_y); float up = tex2D(texref_float_2d, my_x, my_y+1); float down = tex2D(texref_float_2d, my_x, my_y-1); float cen = tex2D(texref_float_2d, my_x, my_y); float downleft = tex2D(texref_float_2d, my_x-1, my_y-1); float downright = tex2D(texref_float_2d, my_x+1, my_y-1); float upleft = tex2D(texref_float_2d, my_x-1, my_y+1); float upright = tex2D(texref_float_2d, my_x+1, my_y+1); // Central differences float dx = (right - left) * 0.5f; float dy = (up - down) * 0.5f; float dxx = right - 2.0f*cen + left; float dyy = up - 2.0f*cen + down; float dxy = (upright + downleft - upleft - downright) * 0.25f; // Curvature calculation float dx_2 = dx*dx; float dy_2 = dy*dy; float curv = -(dxx*(dy_2) - 2.0f*dx*dy*dxy + dyy*(dx_2)) / (1e-16f + dx_2 + dy_2); // Update out[idx] = cen - timestep*curv; } // Evolve psi one timestep. __global__ void k_evolve_psi (float* out, int* ids, float* speed, float* speed_dx, float* speed_dy, float timestep, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch+my_x; int2 closest_point = tex2D(texref_int2_2d, my_x, my_y); int speed_idx = closest_point.y*pitch + closest_point.x; // Read surrounding psi values float left = tex2D(texref_float_2d, my_x-1, my_y); float right = tex2D(texref_float_2d, my_x+1, my_y); float up = tex2D(texref_float_2d, my_x, my_y+1); float down = tex2D(texref_float_2d, my_x, my_y-1); float cen = tex2D(texref_float_2d, my_x, my_y); float downleft = tex2D(texref_float_2d, my_x-1, my_y-1); float downright = tex2D(texref_float_2d, my_x+1, my_y-1); float upleft = tex2D(texref_float_2d, my_x-1, my_y+1); float upright = tex2D(texref_float_2d, my_x+1, my_y+1); int id_cen = tex2D(texref_int_2d, my_x, my_y); int id_right = tex2D(texref_int_2d, my_x+1, my_y); int id_up = tex2D(texref_int_2d, my_x, my_y+1); int id_across = tex2D(texref_int_2d, my_x+1, my_y+1); bool on_skel = (id_cen && (id_right || id_up || id_across)); // Upwind derivatives float dx_plus = right - cen; float dx_minus = cen - left; float dy_plus = up - cen; float dy_minus = cen - down; float dx_p_max = fmaxf(0.0f, dx_plus); float dx_p_min = fminf(0.0f, dx_plus); float dx_m_max = fmaxf(0.0f, dx_minus); float dx_m_min = fminf(0.0f, dx_minus); float dy_p_max = fmaxf(0.0f, dy_plus); float dy_p_min = fminf(0.0f, dy_plus); float dy_m_max = fmaxf(0.0f, dy_minus); float dy_m_min = fminf(0.0f, dy_minus); float grad_plus = dx_m_max*dx_m_max + dx_p_min*dx_p_min + dy_m_max*dy_m_max + dy_p_min*dy_p_min; float grad_minus = dx_p_max*dx_p_max + dx_m_min*dx_m_min + dy_p_max*dy_p_max + dy_m_min*dy_m_min; grad_minus = sqrtf(grad_minus); grad_plus = sqrtf(grad_plus); /* float dx_1 = cen - fminf(right, left); float dy_1 = cen - fminf(up, down); float dx_plus = fmaxf(dx_1, 0.0f); float dx_minus = fminf(dx_1, 0.0f); float dy_plus = fmaxf(dy_1, 0.0f); float dy_minus = fminf(dy_1, 0.0f); float grad_plus = sqrtf(dx_plus*dx_plus + dy_plus*dy_plus); float grad_minus = sqrtf(dx_minus*dx_minus + dy_minus*dy_minus);*/ // Central differences float dx = (right - left) * 0.5f; float dy = (up - down) * 0.5f; float dxx = right - 2.0f*cen + left; float dyy = up - 2.0f*cen + down; float dxy = (upright + downleft - upleft - downright) * 0.25f; // Curvature calculation float dx_2 = dx*dx; float dy_2 = dy*dy; float mag = sqrtf(dx_2+dy_2); float curv = (dxx*dy_2 - 2.0f*dx*dy*dxy + dyy*dx_2) / (mag*(dx_2+dy_2)+1e-16f); curv = fmaxf(-1.0f, fminf(1.0f, curv)); // Doublet term float doublet = (dx*speed_dx[speed_idx] + dy*speed_dy[speed_idx]) / (mag+1e-16f); doublet = fmaxf(0.0f, doublet); // Calculate speed at this pixel float final_speed = speed[speed_idx]*(1.0f - 0.3f*curv) - doublet; final_speed = fminf(1.0f, fmaxf(-1.0f, final_speed)); // Determine final delta_psi based on sign of speed float final_grad = fmaxf(0.0f, final_speed)*grad_plus + fminf(0.0f, final_speed)*grad_minus; // Update psi float new_psi = cen - timestep*final_grad; out[idx] = on_skel? cen : new_psi; // Update id int new_id = (cen > 0.0f && new_psi <= 0.0f) ? ids[speed_idx] : id_cen; ids[idx] = new_id; } // Initialize the jump flood algorithm for computing feature distance transform. // Feature pixels are defined to be those that have psi<=0 (pixels inside the expanding boundary). // Equivalently, these pixels have nonzero SEED IDs. __global__ void k_init_jumpflood (int2* out, int* ids, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch + my_x; int2 outval = make_int2(-width, -height); if (ids[idx] > 0) outval = make_int2(my_x, my_y); out[idx] = outval; } // Perform one pass of the jump flood algorithm. Updates indirection map for this value of stride. // Optionally computes a distance transform essentially for free, if dist_out != NULL. // texref_shirt_2d : current state of the indirection map __global__ void k_jumpflood_pass (int2* out, int stride, int width, int height, int pitch) { bool valid[8]; int2 point0; int2 point1; int2 point2; int2 point3; int2 point4; int2 point5; int2 point6; int2 point7; int2 point8; int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int min_dist2 = INT_MAX; int2 min_point; bool xmin = my_x-stride >= 0; bool xmax = my_x+stride < width; bool ymin = my_y-stride >= 0; bool ymax = my_y+stride < height; valid[0] = xmin && ymin; valid[1] = ymin; valid[2] = xmax && ymin; valid[3] = xmin; valid[4] = xmax; valid[5] = xmin && ymax; valid[6] = ymax; valid[7] = xmax && ymax; if (valid[0]) point0 = tex2D(texref_int2_2d, my_x-stride, my_y-stride); if (valid[1]) point1 = tex2D(texref_int2_2d, my_x, my_y-stride); if (valid[2]) point2 = tex2D(texref_int2_2d, my_x+stride, my_y-stride); if (valid[3]) point3 = tex2D(texref_int2_2d, my_x-stride, my_y); if (valid[4]) point4 = tex2D(texref_int2_2d, my_x+stride, my_y); if (valid[5]) point5 = tex2D(texref_int2_2d, my_x-stride, my_y+stride); if (valid[6]) point6 = tex2D(texref_int2_2d, my_x, my_y+stride); if (valid[7]) point7 = tex2D(texref_int2_2d, my_x+stride, my_y+stride); point8 = tex2D(texref_int2_2d, my_x, my_y); { int dx = point0.x - my_x; int dy = point0.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[0] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point0; } } { int dx = point1.x - my_x; int dy = point1.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[1] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point1; } } { int dx = point2.x - my_x; int dy = point2.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[2] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point2; } } { int dx = point3.x - my_x; int dy = point3.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[3] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point3; } } { int dx = point4.x - my_x; int dy = point4.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[4] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point4; } } { int dx = point5.x - my_x; int dy = point5.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[5] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point5; } } { int dx = point6.x - my_x; int dy = point6.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[6] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point6; } } { int dx = point7.x - my_x; int dy = point7.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[7] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point7; } } { int dx = point8.x - my_x; int dy = point8.y - my_y; int dist2 = dx*dx + dy*dy; if (dist2 < min_dist2) { min_dist2 = dist2; min_point = point8; } } int idx = my_y*pitch + my_x; out[idx] = min_point; } // Exracts the zero level set, the boundary between superpixels. It looks for // discontinuities in the Superpixel ID assignment map. // texref_int_2d : superpixel ID map __global__ void k_extract_level_set(float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int me = tex2D(texref_int_2d, my_x, my_y); int up = tex2D(texref_int_2d, my_x, my_y+1); int right = tex2D(texref_int_2d, my_x+1, my_y); int idx = my_y*pitch + my_x; float outval = 0.0f; if ( me != up || me != right) { outval = 1.0f; } out[idx] = outval; } // Fills any unassigned holes in the image not yet claimed by a superpixel. // It does this by using the indirection map to calculate a generalized Voronoi // diagram (each pixel is assigned to its nearest superpixel). // texref_int_2d : superpixel ID map // texref_int2_2d : indirection map __global__ void k_fill_holes(int* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int2 nearest_point = tex2D(texref_int2_2d, my_x, my_y); int nearest_id = tex2D(texref_int_2d, nearest_point.x, nearest_point.y); out[my_y*pitch+my_x] = nearest_id; } // // Host code // int get_next_pow2(int n) { return 1 << (int)ceilf(log2f((float)n)); } // Initialize GPU Device (equivalent to CUT_DEVICE_INIT) void InitDevice(int dev = 0, bool verb = false) { int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "cutil error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (dev < 0) dev = 0; if (dev > deviceCount-1) dev = deviceCount - 1; hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major < 1) { fprintf(stderr, "cutil error: device does not support CUDA.\n"); exit(EXIT_FAILURE); } if (verb) fprintf(stderr, "Using device %d: %s\n", dev, deviceProp.name); CUDA_SAFE_CALL(hipSetDevice(dev)); } /* // Writes a floating-point array in device memory to a BMP file as a grayscale image. void write_float_bmp (const char* file, float* d_ptr) { float* h_out = new float [img_pixels]; unsigned long* tmp = new unsigned long [img_realpixels]; hipMemcpy(h_out, d_ptr, img_pixels*sizeof(float), hipMemcpyDeviceToHost); unsigned char* p = (unsigned char*)tmp; for (int y = 0; y < img_height; y++) { for (int x = 0; x < img_width; x++) { float val = h_out[y*img_pitch+x]; val=::max(0.0f, ::min(1.0f, val)); *(p++) = (unsigned char)(val * 255.0f); *(p++) = (unsigned char)(val * 255.0f); *(p++) = (unsigned char)(val * 255.0f); *p++ = 255; } } bmp_24_write(file, img_width, img_height, tmp); delete[] tmp; delete[] h_out; } // Writes an array in device memory of 16-bit integers to a BMP file. // Each possible integer value is mapped to one of 8 colors, with 0 uniquely mapped to black. void write_int_bmp (const char* file, int* d_ptr) { static unsigned long colors[] = {0xFF8000, 0xFF0000, 0x00FF00, 0x0000FF, 0x00FFFF, 0xFF00FF, 0xFFFF00, 0xFFFFFF}; int* h_out = new int [img_pixels]; unsigned long* tmp = new unsigned long [img_realpixels]; hipMemcpy(h_out, d_ptr, img_pixels*sizeof(int), hipMemcpyDeviceToHost); unsigned long* p = tmp; for (int y = 0; y < img_height; y++) { for (int x = 0; x < img_width; x++) { int i = y*img_pitch+x; *p++ = (h_out[i] == 0) ? 0x0 : colors[h_out[i] & 0x7]; } } bmp_24_write(file, img_width, img_height, tmp); delete[] tmp; delete[] h_out; } // Writes an array in device memory of 16-bit integers to a BMP file. // Each possible integer value is mapped to one of 8 colors, with 0 uniquely mapped to black. void write_int_bmp2 (const char* file, int* d_ptr) { int* h_out = new int [img_pixels]; unsigned long* tmp = new unsigned long [img_realpixels]; uint16_t out16; hipMemcpy(h_out, d_ptr, img_pixels*sizeof(int), hipMemcpyDeviceToHost); unsigned long* p = tmp; for (int y = 0; y < img_height; y++) { for (int x = 0; x < img_width; x++) { int i = y*img_pitch+x; out16 = 5 * h_out[i] & 0xFFFF; *p++ = (h_out[i] == 0) ? 0x0 : out16 + out16<<16; } } bmp_24_write(file, img_width, img_height, tmp); delete[] tmp; delete[] h_out; } */ // Copies an image in linear device memory to a Cuda Array texture. template <class T> void copy_lin_to_tex(hipArray* dest, T* src) { CUDA_SAFE_CALL(hipMemcpy2DToArray(dest, 0, 0, src, img_pitch*sizeof(T), img_width*sizeof(T), img_height, hipMemcpyDeviceToDevice)); } void calc_speed_based_on_gradient() { // Get gradient magnitude copy_lin_to_tex(d_float_array, d_float_temp1); k_gradient_mag<<<STD_CONFIG>>> (d_float_temp1, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("kernel failed"); // Filter the gradient magnitude float sigma = floorf(sqrtf((float)img_width * (float)img_height / (float)N_SUPERPIXELS) / 2.5f); gaussianFilter(d_float_temp1, d_speed, d_float_temp2, img_pitch, img_height, sigma, 0); // Calculate speed k_calc_speed_based_on_gradient<<<STD_CONFIG>>> (d_float_temp1, d_speed, 1.0f / (sigma*2.5f), d_speed, img_width, img_height, img_pitch); // Calculate derivatives of speed copy_lin_to_tex(d_float_array, d_speed); k_gradient_xy<<<STD_CONFIG>>> (d_speed_dx, d_speed_dy, img_width, img_height, img_pitch); //if (DEBUG_IMG) write_float_bmp("speed.bmp", d_speed); } void place_seeds_and_init_psi() { // Do initial seed placement float size_grid_r = 1.0f / sqrtf((float)img_pixels / (float)N_SUPERPIXELS); float rows_f = img_height * size_grid_r; float cols_f = img_width * size_grid_r; int x_dist = (int)ceilf(1.0f/size_grid_r); int y_dist = (int)ceilf(1.0f/size_grid_r); int rows = (int)ceilf(rows_f - 1); int cols = (int)ceilf(cols_f - 1); unsigned int max_shift=(unsigned int)ceilf( min((float)x_dist,(float)y_dist)*0.25f - 1.0f ); // our seed location array is only of size N_SUPERPIXELS. actual number of seeds should be less, in theory. assert(rows*cols <= N_SUPERPIXELS); // Place the seeds hipLaunchKernelGGL(( k_place_seeds), dim3(dim3((cols+15)>>4, (rows+15)>>4)), dim3(dim3(16, 16)) , 0, 0, d_seed_locations, x_dist, y_dist, max_shift, rows, cols); CUT_CHECK_ERROR ("place seeds kernel failed"); // Initialize psi (distance func) CUDA_SAFE_CALL(hipMemcpyToSymbol(d_seed_coords_const, d_seed_locations, rows*cols*sizeof(float2), 0, hipMemcpyDeviceToDevice)); k_init_psi<<<STD_CONFIG>>> (d_float_temp1, d_assignment_map, rows*cols, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("init psi kernel failed"); //if (DEBUG_IMG) write_float_bmp("psi_start.bmp", d_float_temp1); } void feature_distance_transform() { int n_passes = (int)ceilf(log2f((float)max((int)img_width, (int)img_height))) - 1; // Initialize the indirection map copy_lin_to_tex(d_float_array, d_float_temp1); k_init_jumpflood<<<STD_CONFIG>>> (d_indirection_map, d_assignment_map, img_width, img_height, img_pitch); // Perform logn+1 passes, halving the stride each time for (int i = n_passes; i >= 0; i--) { copy_lin_to_tex(d_int2_array, d_indirection_map); k_jumpflood_pass<<<STD_CONFIG>>> (d_indirection_map, 1 << i, img_width, img_height, img_pitch); } /* // Another logn passes at stride=1 cleans up any remaining errors for (int i = 0; i < n_passes; i++) { copy_lin_to_tex(d_int2_array, d_indirection_map); k_jumpflood_pass<<<STD_CONFIG>>> (d_indirection_map, 1, img_width, img_height, img_pitch); }*/ copy_lin_to_tex(d_int2_array, d_indirection_map); } void _initialize(int dev, long width, long height, unsigned int nSuperpixels) { // Prep the device InitDevice(dev); // Verify that number of superpixels is not too large if (nSuperpixels > _MAX_SUPERPIXELS){ fprintf(stderr, "Error: nSuperpixels > MAX_SUPERPIXELS.\n"); exit(EXIT_FAILURE); }else N_SUPERPIXELS = nSuperpixels; img_width = width; img_height = height; img_realpixels = img_width*img_height; img_pitch = (img_width + 31) & (~31); img_pixels = img_pitch*img_height; img_pixels_pow2 = get_next_pow2(img_pixels); // Channel formats for textures hipChannelFormatDesc d_float_channel_desc = hipCreateChannelDesc<float>(); texref_float_2d.addressMode[0] = hipAddressModeClamp; texref_float_2d.addressMode[1] = hipAddressModeClamp; texref_float_2d.filterMode = hipFilterModePoint; texref_float_2d.normalized = false; hipChannelFormatDesc d_int2_channel_desc = hipCreateChannelDesc<int2>(); texref_int2_2d.addressMode[0] = hipAddressModeClamp; texref_int2_2d.addressMode[1] = hipAddressModeClamp; texref_int2_2d.filterMode = hipFilterModePoint; texref_int2_2d.normalized = false; hipChannelFormatDesc d_int_channel_desc = hipCreateChannelDesc<int>(); texref_int_2d.addressMode[0] = hipAddressModeClamp; texref_int_2d.addressMode[1] = hipAddressModeClamp; texref_int_2d.filterMode = hipFilterModePoint; texref_int_2d.normalized = false; // Allocate buffers CUDA_SAFE_CALL(hipMalloc((void**)&d_uchar4_temp1, img_realpixels*sizeof(uchar4))); CUDA_SAFE_CALL(hipMalloc((void**)&d_uchar1_temp1, img_realpixels*sizeof(uchar1))); CUDA_SAFE_CALL(hipMalloc((void**)&d_float_temp1, img_pixels*sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**)&d_float_temp2, img_pixels*sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**)&d_int_temp1, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void**)&d_indirection_map, img_pixels*sizeof(int2))); CUDA_SAFE_CALL(hipMalloc((void**)&d_assignment_map, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void**)&d_speed, img_pixels*sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**)&d_speed_dx, img_pixels*sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**)&d_speed_dy, img_pixels*sizeof(float))); CUDA_SAFE_CALL(hipMallocArray(&d_float_array, &d_float_channel_desc, img_width, img_height)); CUDA_SAFE_CALL(hipMallocArray(&d_int2_array, &d_int2_channel_desc, img_width, img_height)); CUDA_SAFE_CALL(hipMallocArray(&d_int_array, &d_int_channel_desc, img_width, img_height)); CUDA_SAFE_CALL(hipMalloc((void**)&d_seed_locations, N_SUPERPIXELS*sizeof(float2))); // Bind arrays to texture references CUDA_SAFE_CALL(hipBindTextureToArray(texref_float_2d, d_float_array, d_float_channel_desc)); CUDA_SAFE_CALL(hipBindTextureToArray(texref_int2_2d, d_int2_array, d_int2_channel_desc)); CUDA_SAFE_CALL(hipBindTextureToArray(texref_int_2d, d_int_array, d_int_channel_desc)); } // Image in RGB encoded in an unsigned long void prepare_image(uint64_t* input_img) { // Zero out these two arrays. Their sizes are padded to next-pow2 to simplify the reduction // kernel, but those extra padded entries MUST NOT have garbage in them. CUDA_SAFE_CALL(hipMemset(d_assignment_map, 0, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(hipMemset(d_int_temp1, 0, img_pixels_pow2*sizeof(int))); // Convert image to greyscale CUDA_SAFE_CALL(hipMemcpy(d_uchar4_temp1, input_img, img_realpixels*sizeof(uchar4), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipBindTexture(0, texref_uchar4_1d, d_uchar4_temp1, img_realpixels*sizeof(uchar4))); k_rgb2grayf<<<STD_CONFIG>>> (d_float_temp1, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("kernel failed"); // Smooth the image for (int i = 0; i < 10; i++) { copy_lin_to_tex(d_float_array, d_float_temp1); k_smooth_image<<<STD_CONFIG>>> (d_float_temp1, 0.1f, img_width, img_height, img_pitch); } //if (DEBUG_IMG) write_float_bmp("smoothed.bmp", d_float_temp1); } // Prepare image if input is already float and normalized void prepare_image(float *input_img) { // Zero out these two arrays. Their sizes are padded to next-pow2 to simplify the reduction // kernel, but those extra padded entries MUST NOT have garbage in them. CUDA_SAFE_CALL(hipMemset(d_assignment_map, 0, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(hipMemset(d_int_temp1, 0, img_pixels_pow2*sizeof(int))); // Copy data to device CUDA_SAFE_CALL(hipMemcpy(d_float_temp1, input_img, img_pixels*sizeof(float), hipMemcpyHostToDevice)); // Smooth the image for (int i = 0; i < 10; i++) { copy_lin_to_tex(d_float_array, d_float_temp1); k_smooth_image<<<STD_CONFIG>>> (d_float_temp1, 0.1f, img_width, img_height, img_pitch); } //if (DEBUG_IMG) write_float_bmp("smoothed.bmp", d_float_temp1); } // Prepare image if input is uchars void prepare_image(unsigned char* input_img) { // Zero out these two arrays. Their sizes are padded to next-pow2 to simplify the reduction // kernel, but those extra padded entries MUST NOT have garbage in them. CUDA_SAFE_CALL(hipMemset(d_assignment_map, 0, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(hipMemset(d_int_temp1, 0, img_pixels_pow2*sizeof(int))); // Convert image to internal format CUDA_SAFE_CALL(hipMemcpy(d_uchar1_temp1, input_img, img_realpixels*sizeof(uchar1), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipBindTexture(0, texref_uchar_1d, d_uchar1_temp1, img_realpixels*sizeof(uchar1))); k_gray2grayf<<<STD_CONFIG>>> (d_float_temp1, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("kernel failed"); // Smooth the image for (int i = 0; i < 10; i++) { copy_lin_to_tex(d_float_array, d_float_temp1); k_smooth_image<<<STD_CONFIG>>> (d_float_temp1, 0.1f, img_width, img_height, img_pitch); } //if (DEBUG_IMG) write_float_bmp("smoothed.bmp", d_float_temp1); } // Assumption: Someone must have already called "_initialize" // If input is a float, we assume somebody else has already converted the // image to grayscale and normalized template <typename T> int extractSuperpixels(unsigned int* output, T* input_img) { /* unsigned int timer = 0; cutCreateTimer(&timer); cutStartTimer(timer); */ int iter; prepare_image(input_img); calc_speed_based_on_gradient(); place_seeds_and_init_psi(); // Evolve psi until little forward progress is made int old_covered_pixels = do_reduction_count(d_assignment_map, d_int_temp1, img_pixels_pow2); for (iter = 0; iter < MAX_ITERATIONS; iter++) { feature_distance_transform(); copy_lin_to_tex(d_int_array, d_assignment_map); k_evolve_psi<<<STD_CONFIG>>> (d_float_temp1, d_assignment_map, d_speed, d_speed_dx, d_speed_dy, 0.5f, img_width, img_height, img_pitch); int new_covered_pixels = do_reduction_count(d_assignment_map, d_int_temp1, img_pixels_pow2); float relative_inc = (float)(new_covered_pixels - old_covered_pixels) / (float)img_realpixels; old_covered_pixels = new_covered_pixels; if (relative_inc < 1e-4 && new_covered_pixels >= img_realpixels/2) break; } // Assign any remaining unassigned areas and generate the final superpixel boundaries k_fill_holes<<<STD_CONFIG>>> (d_assignment_map, img_width, img_height, img_pitch); hipDeviceSynchronize(); /* cutStopTimer(timer); printf("time: %f ms\n", cutGetTimerValue(timer)); */ // Copy data from device memory to output hipMemcpy(output, d_assignment_map, img_pixels*sizeof(int), hipMemcpyDeviceToHost); return iter; } void _cleanup() { hipFree(d_uchar4_temp1); hipFree(d_uchar1_temp1); hipFree(d_float_temp1); hipFree(d_float_temp2); hipFree(d_int_temp1); hipFree(d_indirection_map); hipFree(d_assignment_map); hipFree(d_speed); hipFree(d_speed_dx); hipFree(d_speed_dy); hipFree(d_seed_locations); hipFreeArray(d_float_array); hipFreeArray(d_int2_array); hipFreeArray(d_int_array); } // Template explicit instantiation template int extractSuperpixels(unsigned int* output, unsigned char* input_img); template int extractSuperpixels(unsigned int* output, float* input_img); template int extractSuperpixels(unsigned int* output, uint64_t* input_img);
1ce321a0bc26f8754e3a7f8a64d46c2b640fe57e.cu
/* * turbopix.cu * * Created on: Aug 20, 2011 * Author: unknown * Modified by: Alvaro Collet (acollet@cs.cmu.edu) * * Copyright (c) 2011, Carnegie Mellon University. * All rights reserved. * * Software License Agreement (BSD License) * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <turbopix.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <cutil.h> #include <assert.h> // #include "bmp_io.h" extern void gaussianFilter(float *d_src, float *d_dest, float *d_temp, int width, int height, float sigma, int order); extern int do_reduction_count(int* d_idata, int* d_odata, int n); #define STD_CONFIG dim3((img_width+15)>>4,(img_height+15)>>4),dim3(16,16) // Device global memory uchar4* d_uchar4_temp1; uchar1* d_uchar1_temp1; float* d_float_temp1; float* d_float_temp2; int* d_int_temp1; float* d_speed; float* d_speed_dx; float* d_speed_dy; cudaArray* d_float_array; cudaArray* d_int2_array; cudaArray* d_int_array; float2* d_seed_locations; int2* d_indirection_map; int* d_assignment_map; // Tex references texture<uchar4, 1, cudaReadModeNormalizedFloat> texref_uchar4_1d; texture<uchar1, 1, cudaReadModeNormalizedFloat> texref_uchar_1d; texture<int2, 2, cudaReadModeElementType> texref_int2_2d; texture<float, 2, cudaReadModeElementType> texref_float_2d; texture<int, 2, cudaReadModeElementType> texref_int_2d; // Device constant memory __device__ __constant__ float2 d_seed_coords_const[_MAX_SUPERPIXELS]; // Host globals //unsigned long* h_input_image; unsigned int N_SUPERPIXELS; uint64_t* h_input_image; unsigned long img_width; long img_height; long img_pitch; int img_pixels; int img_pixels_pow2; int img_realpixels; // // Device Code // // Converts a 32-bit RGB image to normalized float grayscale // texref_uchar4_1d : input image __global__ void k_rgb2grayf(float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockDim.x*blockIdx.x; int my_y = threadIdx.y + blockDim.y*blockIdx.y; if (my_x >= width || my_y >= height) return; const float4 transf = make_float4(0.2989, 0.5870, 0.1140, 0); float4 pixel = tex1Dfetch(texref_uchar4_1d, my_y*width+my_x); out[my_y*pitch+my_x] = transf.x*pixel.x + transf.y*pixel.y + transf.z*pixel.z; } // Converts an unsigned char grayscale image to normalized float grayscale // texref_uchar_1d : input image __global__ void k_gray2grayf(float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockDim.x*blockIdx.x; int my_y = threadIdx.y + blockDim.y*blockIdx.y; if (my_x >= width || my_y >= height) return; float1 pixel = tex1Dfetch(texref_uchar_1d, my_y*width+my_x); out[my_y*pitch+my_x] = pixel.x; } // Computes gradient and stores its magnitude // texref_float_2d : input __global__ void k_gradient_mag(float* out, int width, int height, int pitch) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if (x >= width || y >= height) return; int out_idx = y*pitch+x; float gx = 0.5f * (tex2D(texref_float_2d, x+1, y) - tex2D(texref_float_2d, x-1, y)); float gy = 0.5f * (tex2D(texref_float_2d, x, y+1) - tex2D(texref_float_2d, x, y-1)); out[out_idx] = sqrtf(gx*gx + gy*gy); } // Computes gradient and stores its x/y components // texref_float_2d : input __global__ void k_gradient_xy(float* out_x, float* out_y, int width, int height, int pitch) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if (x >= width || y >= height) return; int out_idx = y*pitch+x; float gx = 0.5f * (tex2D(texref_float_2d, x+1, y) - tex2D(texref_float_2d, x-1, y)); float gy = 0.5f * (tex2D(texref_float_2d, x, y+1) - tex2D(texref_float_2d, x, y-1)); out_x[out_idx] = gx; out_y[out_idx] = gy; } // Given a grid of rows*cols cells, each cell of size x_dist*y_dist, place a seed in each one. // The seed is moved within a 2rad*2rad box around the center point of each cell such that // it lies at a point of maximum speed. // texref_float_2d : input speed map __global__ void k_place_seeds(float2* out, int x_dist, int y_dist, int rad, int rows, int cols) { int2 coord; float max = 0.0f; int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= cols || my_y >= rows) return; int cen_x = my_x * x_dist + (x_dist>>1); int cen_y = my_y * y_dist + (y_dist>>1); for (int y = -rad; y < rad; y++) { for (int x = -rad; x < rad; x++) { float val = tex2D(texref_float_2d, cen_x + x, cen_y + y); if (val > max) { max = val; coord = make_int2(cen_x+x, cen_y+y); } } } out[my_x + my_y*cols] = make_float2((float)coord.x, (float)coord.y); } // Initialize psi using the seed locations, by computing a distance transform. Each // output pixel will contain the distance to the closest seed. // d_seed_coords_const : seed locations stored in constant memory __global__ void k_init_psi(float* out_psi, int* out_id, int n_seeds, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; float my_xf = (float)my_x; float my_yf = (float)my_y; float min_dist2 = 1e9; int id = 0; for (int i = 0; i < n_seeds; i++) { float dx = my_xf - d_seed_coords_const[i].x; float dy = my_yf - d_seed_coords_const[i].y; float dist2 = dx*dx + dy*dy; min_dist2 = fminf(dist2, min_dist2); if (dist2 <= 1.0f) { id = i+1; } } int idx = my_y*pitch + my_x; out_psi[idx] = sqrtf(min_dist2) - 1.0f; out_id[idx] = id; } // Given an image gradient magnitude map and a smoothed version of it, compute // the speed map. __global__ void k_calc_speed_based_on_gradient (float* grad, float* smooth_grad, float rsigma, float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch + my_x; float mag = grad[idx]; float smooth_mag = smooth_grad[idx] * rsigma; float normGradMag = (mag / (0.1f + smooth_mag)); out[idx] = expf(-normGradMag); } // Perform initial smoothing of the image. This kernel smoothes by one timestep. __global__ void k_smooth_image (float* out, float timestep, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch+my_x; // Read surrounding values float left = tex2D(texref_float_2d, my_x-1, my_y); float right = tex2D(texref_float_2d, my_x+1, my_y); float up = tex2D(texref_float_2d, my_x, my_y+1); float down = tex2D(texref_float_2d, my_x, my_y-1); float cen = tex2D(texref_float_2d, my_x, my_y); float downleft = tex2D(texref_float_2d, my_x-1, my_y-1); float downright = tex2D(texref_float_2d, my_x+1, my_y-1); float upleft = tex2D(texref_float_2d, my_x-1, my_y+1); float upright = tex2D(texref_float_2d, my_x+1, my_y+1); // Central differences float dx = (right - left) * 0.5f; float dy = (up - down) * 0.5f; float dxx = right - 2.0f*cen + left; float dyy = up - 2.0f*cen + down; float dxy = (upright + downleft - upleft - downright) * 0.25f; // Curvature calculation float dx_2 = dx*dx; float dy_2 = dy*dy; float curv = -(dxx*(dy_2) - 2.0f*dx*dy*dxy + dyy*(dx_2)) / (1e-16f + dx_2 + dy_2); // Update out[idx] = cen - timestep*curv; } // Evolve psi one timestep. __global__ void k_evolve_psi (float* out, int* ids, float* speed, float* speed_dx, float* speed_dy, float timestep, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch+my_x; int2 closest_point = tex2D(texref_int2_2d, my_x, my_y); int speed_idx = closest_point.y*pitch + closest_point.x; // Read surrounding psi values float left = tex2D(texref_float_2d, my_x-1, my_y); float right = tex2D(texref_float_2d, my_x+1, my_y); float up = tex2D(texref_float_2d, my_x, my_y+1); float down = tex2D(texref_float_2d, my_x, my_y-1); float cen = tex2D(texref_float_2d, my_x, my_y); float downleft = tex2D(texref_float_2d, my_x-1, my_y-1); float downright = tex2D(texref_float_2d, my_x+1, my_y-1); float upleft = tex2D(texref_float_2d, my_x-1, my_y+1); float upright = tex2D(texref_float_2d, my_x+1, my_y+1); int id_cen = tex2D(texref_int_2d, my_x, my_y); int id_right = tex2D(texref_int_2d, my_x+1, my_y); int id_up = tex2D(texref_int_2d, my_x, my_y+1); int id_across = tex2D(texref_int_2d, my_x+1, my_y+1); bool on_skel = (id_cen && (id_right || id_up || id_across)); // Upwind derivatives float dx_plus = right - cen; float dx_minus = cen - left; float dy_plus = up - cen; float dy_minus = cen - down; float dx_p_max = fmaxf(0.0f, dx_plus); float dx_p_min = fminf(0.0f, dx_plus); float dx_m_max = fmaxf(0.0f, dx_minus); float dx_m_min = fminf(0.0f, dx_minus); float dy_p_max = fmaxf(0.0f, dy_plus); float dy_p_min = fminf(0.0f, dy_plus); float dy_m_max = fmaxf(0.0f, dy_minus); float dy_m_min = fminf(0.0f, dy_minus); float grad_plus = dx_m_max*dx_m_max + dx_p_min*dx_p_min + dy_m_max*dy_m_max + dy_p_min*dy_p_min; float grad_minus = dx_p_max*dx_p_max + dx_m_min*dx_m_min + dy_p_max*dy_p_max + dy_m_min*dy_m_min; grad_minus = sqrtf(grad_minus); grad_plus = sqrtf(grad_plus); /* float dx_1 = cen - fminf(right, left); float dy_1 = cen - fminf(up, down); float dx_plus = fmaxf(dx_1, 0.0f); float dx_minus = fminf(dx_1, 0.0f); float dy_plus = fmaxf(dy_1, 0.0f); float dy_minus = fminf(dy_1, 0.0f); float grad_plus = sqrtf(dx_plus*dx_plus + dy_plus*dy_plus); float grad_minus = sqrtf(dx_minus*dx_minus + dy_minus*dy_minus);*/ // Central differences float dx = (right - left) * 0.5f; float dy = (up - down) * 0.5f; float dxx = right - 2.0f*cen + left; float dyy = up - 2.0f*cen + down; float dxy = (upright + downleft - upleft - downright) * 0.25f; // Curvature calculation float dx_2 = dx*dx; float dy_2 = dy*dy; float mag = sqrtf(dx_2+dy_2); float curv = (dxx*dy_2 - 2.0f*dx*dy*dxy + dyy*dx_2) / (mag*(dx_2+dy_2)+1e-16f); curv = fmaxf(-1.0f, fminf(1.0f, curv)); // Doublet term float doublet = (dx*speed_dx[speed_idx] + dy*speed_dy[speed_idx]) / (mag+1e-16f); doublet = fmaxf(0.0f, doublet); // Calculate speed at this pixel float final_speed = speed[speed_idx]*(1.0f - 0.3f*curv) - doublet; final_speed = fminf(1.0f, fmaxf(-1.0f, final_speed)); // Determine final delta_psi based on sign of speed float final_grad = fmaxf(0.0f, final_speed)*grad_plus + fminf(0.0f, final_speed)*grad_minus; // Update psi float new_psi = cen - timestep*final_grad; out[idx] = on_skel? cen : new_psi; // Update id int new_id = (cen > 0.0f && new_psi <= 0.0f) ? ids[speed_idx] : id_cen; ids[idx] = new_id; } // Initialize the jump flood algorithm for computing feature distance transform. // Feature pixels are defined to be those that have psi<=0 (pixels inside the expanding boundary). // Equivalently, these pixels have nonzero SEED IDs. __global__ void k_init_jumpflood (int2* out, int* ids, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int idx = my_y*pitch + my_x; int2 outval = make_int2(-width, -height); if (ids[idx] > 0) outval = make_int2(my_x, my_y); out[idx] = outval; } // Perform one pass of the jump flood algorithm. Updates indirection map for this value of stride. // Optionally computes a distance transform essentially for free, if dist_out != NULL. // texref_shirt_2d : current state of the indirection map __global__ void k_jumpflood_pass (int2* out, int stride, int width, int height, int pitch) { bool valid[8]; int2 point0; int2 point1; int2 point2; int2 point3; int2 point4; int2 point5; int2 point6; int2 point7; int2 point8; int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int min_dist2 = INT_MAX; int2 min_point; bool xmin = my_x-stride >= 0; bool xmax = my_x+stride < width; bool ymin = my_y-stride >= 0; bool ymax = my_y+stride < height; valid[0] = xmin && ymin; valid[1] = ymin; valid[2] = xmax && ymin; valid[3] = xmin; valid[4] = xmax; valid[5] = xmin && ymax; valid[6] = ymax; valid[7] = xmax && ymax; if (valid[0]) point0 = tex2D(texref_int2_2d, my_x-stride, my_y-stride); if (valid[1]) point1 = tex2D(texref_int2_2d, my_x, my_y-stride); if (valid[2]) point2 = tex2D(texref_int2_2d, my_x+stride, my_y-stride); if (valid[3]) point3 = tex2D(texref_int2_2d, my_x-stride, my_y); if (valid[4]) point4 = tex2D(texref_int2_2d, my_x+stride, my_y); if (valid[5]) point5 = tex2D(texref_int2_2d, my_x-stride, my_y+stride); if (valid[6]) point6 = tex2D(texref_int2_2d, my_x, my_y+stride); if (valid[7]) point7 = tex2D(texref_int2_2d, my_x+stride, my_y+stride); point8 = tex2D(texref_int2_2d, my_x, my_y); { int dx = point0.x - my_x; int dy = point0.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[0] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point0; } } { int dx = point1.x - my_x; int dy = point1.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[1] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point1; } } { int dx = point2.x - my_x; int dy = point2.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[2] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point2; } } { int dx = point3.x - my_x; int dy = point3.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[3] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point3; } } { int dx = point4.x - my_x; int dy = point4.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[4] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point4; } } { int dx = point5.x - my_x; int dy = point5.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[5] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point5; } } { int dx = point6.x - my_x; int dy = point6.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[6] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point6; } } { int dx = point7.x - my_x; int dy = point7.y - my_y; int dist2 = dx*dx + dy*dy; if (valid[7] && dist2 < min_dist2) { min_dist2 = dist2; min_point = point7; } } { int dx = point8.x - my_x; int dy = point8.y - my_y; int dist2 = dx*dx + dy*dy; if (dist2 < min_dist2) { min_dist2 = dist2; min_point = point8; } } int idx = my_y*pitch + my_x; out[idx] = min_point; } // Exracts the zero level set, the boundary between superpixels. It looks for // discontinuities in the Superpixel ID assignment map. // texref_int_2d : superpixel ID map __global__ void k_extract_level_set(float* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int me = tex2D(texref_int_2d, my_x, my_y); int up = tex2D(texref_int_2d, my_x, my_y+1); int right = tex2D(texref_int_2d, my_x+1, my_y); int idx = my_y*pitch + my_x; float outval = 0.0f; if ( me != up || me != right) { outval = 1.0f; } out[idx] = outval; } // Fills any unassigned holes in the image not yet claimed by a superpixel. // It does this by using the indirection map to calculate a generalized Voronoi // diagram (each pixel is assigned to its nearest superpixel). // texref_int_2d : superpixel ID map // texref_int2_2d : indirection map __global__ void k_fill_holes(int* out, int width, int height, int pitch) { int my_x = threadIdx.x + blockIdx.x*blockDim.x; int my_y = threadIdx.y + blockIdx.y*blockDim.y; if (my_x >= width || my_y >= height) return; int2 nearest_point = tex2D(texref_int2_2d, my_x, my_y); int nearest_id = tex2D(texref_int_2d, nearest_point.x, nearest_point.y); out[my_y*pitch+my_x] = nearest_id; } // // Host code // int get_next_pow2(int n) { return 1 << (int)ceilf(log2f((float)n)); } // Initialize GPU Device (equivalent to CUT_DEVICE_INIT) void InitDevice(int dev = 0, bool verb = false) { int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "cutil error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (dev < 0) dev = 0; if (dev > deviceCount-1) dev = deviceCount - 1; cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major < 1) { fprintf(stderr, "cutil error: device does not support CUDA.\n"); exit(EXIT_FAILURE); } if (verb) fprintf(stderr, "Using device %d: %s\n", dev, deviceProp.name); CUDA_SAFE_CALL(cudaSetDevice(dev)); } /* // Writes a floating-point array in device memory to a BMP file as a grayscale image. void write_float_bmp (const char* file, float* d_ptr) { float* h_out = new float [img_pixels]; unsigned long* tmp = new unsigned long [img_realpixels]; cudaMemcpy(h_out, d_ptr, img_pixels*sizeof(float), cudaMemcpyDeviceToHost); unsigned char* p = (unsigned char*)tmp; for (int y = 0; y < img_height; y++) { for (int x = 0; x < img_width; x++) { float val = h_out[y*img_pitch+x]; val=std::max(0.0f, std::min(1.0f, val)); *(p++) = (unsigned char)(val * 255.0f); *(p++) = (unsigned char)(val * 255.0f); *(p++) = (unsigned char)(val * 255.0f); *p++ = 255; } } bmp_24_write(file, img_width, img_height, tmp); delete[] tmp; delete[] h_out; } // Writes an array in device memory of 16-bit integers to a BMP file. // Each possible integer value is mapped to one of 8 colors, with 0 uniquely mapped to black. void write_int_bmp (const char* file, int* d_ptr) { static unsigned long colors[] = {0xFF8000, 0xFF0000, 0x00FF00, 0x0000FF, 0x00FFFF, 0xFF00FF, 0xFFFF00, 0xFFFFFF}; int* h_out = new int [img_pixels]; unsigned long* tmp = new unsigned long [img_realpixels]; cudaMemcpy(h_out, d_ptr, img_pixels*sizeof(int), cudaMemcpyDeviceToHost); unsigned long* p = tmp; for (int y = 0; y < img_height; y++) { for (int x = 0; x < img_width; x++) { int i = y*img_pitch+x; *p++ = (h_out[i] == 0) ? 0x0 : colors[h_out[i] & 0x7]; } } bmp_24_write(file, img_width, img_height, tmp); delete[] tmp; delete[] h_out; } // Writes an array in device memory of 16-bit integers to a BMP file. // Each possible integer value is mapped to one of 8 colors, with 0 uniquely mapped to black. void write_int_bmp2 (const char* file, int* d_ptr) { int* h_out = new int [img_pixels]; unsigned long* tmp = new unsigned long [img_realpixels]; uint16_t out16; cudaMemcpy(h_out, d_ptr, img_pixels*sizeof(int), cudaMemcpyDeviceToHost); unsigned long* p = tmp; for (int y = 0; y < img_height; y++) { for (int x = 0; x < img_width; x++) { int i = y*img_pitch+x; out16 = 5 * h_out[i] & 0xFFFF; *p++ = (h_out[i] == 0) ? 0x0 : out16 + out16<<16; } } bmp_24_write(file, img_width, img_height, tmp); delete[] tmp; delete[] h_out; } */ // Copies an image in linear device memory to a Cuda Array texture. template <class T> void copy_lin_to_tex(cudaArray* dest, T* src) { CUDA_SAFE_CALL(cudaMemcpy2DToArray(dest, 0, 0, src, img_pitch*sizeof(T), img_width*sizeof(T), img_height, cudaMemcpyDeviceToDevice)); } void calc_speed_based_on_gradient() { // Get gradient magnitude copy_lin_to_tex(d_float_array, d_float_temp1); k_gradient_mag<<<STD_CONFIG>>> (d_float_temp1, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("kernel failed"); // Filter the gradient magnitude float sigma = floorf(sqrtf((float)img_width * (float)img_height / (float)N_SUPERPIXELS) / 2.5f); gaussianFilter(d_float_temp1, d_speed, d_float_temp2, img_pitch, img_height, sigma, 0); // Calculate speed k_calc_speed_based_on_gradient<<<STD_CONFIG>>> (d_float_temp1, d_speed, 1.0f / (sigma*2.5f), d_speed, img_width, img_height, img_pitch); // Calculate derivatives of speed copy_lin_to_tex(d_float_array, d_speed); k_gradient_xy<<<STD_CONFIG>>> (d_speed_dx, d_speed_dy, img_width, img_height, img_pitch); //if (DEBUG_IMG) write_float_bmp("speed.bmp", d_speed); } void place_seeds_and_init_psi() { // Do initial seed placement float size_grid_r = 1.0f / sqrtf((float)img_pixels / (float)N_SUPERPIXELS); float rows_f = img_height * size_grid_r; float cols_f = img_width * size_grid_r; int x_dist = (int)ceilf(1.0f/size_grid_r); int y_dist = (int)ceilf(1.0f/size_grid_r); int rows = (int)ceilf(rows_f - 1); int cols = (int)ceilf(cols_f - 1); unsigned int max_shift=(unsigned int)ceilf( min((float)x_dist,(float)y_dist)*0.25f - 1.0f ); // our seed location array is only of size N_SUPERPIXELS. actual number of seeds should be less, in theory. assert(rows*cols <= N_SUPERPIXELS); // Place the seeds k_place_seeds<<< dim3((cols+15)>>4, (rows+15)>>4), dim3(16, 16) >>> (d_seed_locations, x_dist, y_dist, max_shift, rows, cols); CUT_CHECK_ERROR ("place seeds kernel failed"); // Initialize psi (distance func) CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_seed_coords_const, d_seed_locations, rows*cols*sizeof(float2), 0, cudaMemcpyDeviceToDevice)); k_init_psi<<<STD_CONFIG>>> (d_float_temp1, d_assignment_map, rows*cols, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("init psi kernel failed"); //if (DEBUG_IMG) write_float_bmp("psi_start.bmp", d_float_temp1); } void feature_distance_transform() { int n_passes = (int)ceilf(log2f((float)max((int)img_width, (int)img_height))) - 1; // Initialize the indirection map copy_lin_to_tex(d_float_array, d_float_temp1); k_init_jumpflood<<<STD_CONFIG>>> (d_indirection_map, d_assignment_map, img_width, img_height, img_pitch); // Perform logn+1 passes, halving the stride each time for (int i = n_passes; i >= 0; i--) { copy_lin_to_tex(d_int2_array, d_indirection_map); k_jumpflood_pass<<<STD_CONFIG>>> (d_indirection_map, 1 << i, img_width, img_height, img_pitch); } /* // Another logn passes at stride=1 cleans up any remaining errors for (int i = 0; i < n_passes; i++) { copy_lin_to_tex(d_int2_array, d_indirection_map); k_jumpflood_pass<<<STD_CONFIG>>> (d_indirection_map, 1, img_width, img_height, img_pitch); }*/ copy_lin_to_tex(d_int2_array, d_indirection_map); } void _initialize(int dev, long width, long height, unsigned int nSuperpixels) { // Prep the device InitDevice(dev); // Verify that number of superpixels is not too large if (nSuperpixels > _MAX_SUPERPIXELS){ fprintf(stderr, "Error: nSuperpixels > MAX_SUPERPIXELS.\n"); exit(EXIT_FAILURE); }else N_SUPERPIXELS = nSuperpixels; img_width = width; img_height = height; img_realpixels = img_width*img_height; img_pitch = (img_width + 31) & (~31); img_pixels = img_pitch*img_height; img_pixels_pow2 = get_next_pow2(img_pixels); // Channel formats for textures cudaChannelFormatDesc d_float_channel_desc = cudaCreateChannelDesc<float>(); texref_float_2d.addressMode[0] = cudaAddressModeClamp; texref_float_2d.addressMode[1] = cudaAddressModeClamp; texref_float_2d.filterMode = cudaFilterModePoint; texref_float_2d.normalized = false; cudaChannelFormatDesc d_int2_channel_desc = cudaCreateChannelDesc<int2>(); texref_int2_2d.addressMode[0] = cudaAddressModeClamp; texref_int2_2d.addressMode[1] = cudaAddressModeClamp; texref_int2_2d.filterMode = cudaFilterModePoint; texref_int2_2d.normalized = false; cudaChannelFormatDesc d_int_channel_desc = cudaCreateChannelDesc<int>(); texref_int_2d.addressMode[0] = cudaAddressModeClamp; texref_int_2d.addressMode[1] = cudaAddressModeClamp; texref_int_2d.filterMode = cudaFilterModePoint; texref_int_2d.normalized = false; // Allocate buffers CUDA_SAFE_CALL(cudaMalloc((void**)&d_uchar4_temp1, img_realpixels*sizeof(uchar4))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_uchar1_temp1, img_realpixels*sizeof(uchar1))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_float_temp1, img_pixels*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_float_temp2, img_pixels*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_int_temp1, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_indirection_map, img_pixels*sizeof(int2))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_assignment_map, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_speed, img_pixels*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_speed_dx, img_pixels*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_speed_dy, img_pixels*sizeof(float))); CUDA_SAFE_CALL(cudaMallocArray(&d_float_array, &d_float_channel_desc, img_width, img_height)); CUDA_SAFE_CALL(cudaMallocArray(&d_int2_array, &d_int2_channel_desc, img_width, img_height)); CUDA_SAFE_CALL(cudaMallocArray(&d_int_array, &d_int_channel_desc, img_width, img_height)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_seed_locations, N_SUPERPIXELS*sizeof(float2))); // Bind arrays to texture references CUDA_SAFE_CALL(cudaBindTextureToArray(texref_float_2d, d_float_array, d_float_channel_desc)); CUDA_SAFE_CALL(cudaBindTextureToArray(texref_int2_2d, d_int2_array, d_int2_channel_desc)); CUDA_SAFE_CALL(cudaBindTextureToArray(texref_int_2d, d_int_array, d_int_channel_desc)); } // Image in RGB encoded in an unsigned long void prepare_image(uint64_t* input_img) { // Zero out these two arrays. Their sizes are padded to next-pow2 to simplify the reduction // kernel, but those extra padded entries MUST NOT have garbage in them. CUDA_SAFE_CALL(cudaMemset(d_assignment_map, 0, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(cudaMemset(d_int_temp1, 0, img_pixels_pow2*sizeof(int))); // Convert image to greyscale CUDA_SAFE_CALL(cudaMemcpy(d_uchar4_temp1, input_img, img_realpixels*sizeof(uchar4), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaBindTexture(0, texref_uchar4_1d, d_uchar4_temp1, img_realpixels*sizeof(uchar4))); k_rgb2grayf<<<STD_CONFIG>>> (d_float_temp1, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("kernel failed"); // Smooth the image for (int i = 0; i < 10; i++) { copy_lin_to_tex(d_float_array, d_float_temp1); k_smooth_image<<<STD_CONFIG>>> (d_float_temp1, 0.1f, img_width, img_height, img_pitch); } //if (DEBUG_IMG) write_float_bmp("smoothed.bmp", d_float_temp1); } // Prepare image if input is already float and normalized void prepare_image(float *input_img) { // Zero out these two arrays. Their sizes are padded to next-pow2 to simplify the reduction // kernel, but those extra padded entries MUST NOT have garbage in them. CUDA_SAFE_CALL(cudaMemset(d_assignment_map, 0, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(cudaMemset(d_int_temp1, 0, img_pixels_pow2*sizeof(int))); // Copy data to device CUDA_SAFE_CALL(cudaMemcpy(d_float_temp1, input_img, img_pixels*sizeof(float), cudaMemcpyHostToDevice)); // Smooth the image for (int i = 0; i < 10; i++) { copy_lin_to_tex(d_float_array, d_float_temp1); k_smooth_image<<<STD_CONFIG>>> (d_float_temp1, 0.1f, img_width, img_height, img_pitch); } //if (DEBUG_IMG) write_float_bmp("smoothed.bmp", d_float_temp1); } // Prepare image if input is uchars void prepare_image(unsigned char* input_img) { // Zero out these two arrays. Their sizes are padded to next-pow2 to simplify the reduction // kernel, but those extra padded entries MUST NOT have garbage in them. CUDA_SAFE_CALL(cudaMemset(d_assignment_map, 0, img_pixels_pow2*sizeof(int))); CUDA_SAFE_CALL(cudaMemset(d_int_temp1, 0, img_pixels_pow2*sizeof(int))); // Convert image to internal format CUDA_SAFE_CALL(cudaMemcpy(d_uchar1_temp1, input_img, img_realpixels*sizeof(uchar1), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaBindTexture(0, texref_uchar_1d, d_uchar1_temp1, img_realpixels*sizeof(uchar1))); k_gray2grayf<<<STD_CONFIG>>> (d_float_temp1, img_width, img_height, img_pitch); CUT_CHECK_ERROR ("kernel failed"); // Smooth the image for (int i = 0; i < 10; i++) { copy_lin_to_tex(d_float_array, d_float_temp1); k_smooth_image<<<STD_CONFIG>>> (d_float_temp1, 0.1f, img_width, img_height, img_pitch); } //if (DEBUG_IMG) write_float_bmp("smoothed.bmp", d_float_temp1); } // Assumption: Someone must have already called "_initialize" // If input is a float, we assume somebody else has already converted the // image to grayscale and normalized template <typename T> int extractSuperpixels(unsigned int* output, T* input_img) { /* unsigned int timer = 0; cutCreateTimer(&timer); cutStartTimer(timer); */ int iter; prepare_image(input_img); calc_speed_based_on_gradient(); place_seeds_and_init_psi(); // Evolve psi until little forward progress is made int old_covered_pixels = do_reduction_count(d_assignment_map, d_int_temp1, img_pixels_pow2); for (iter = 0; iter < MAX_ITERATIONS; iter++) { feature_distance_transform(); copy_lin_to_tex(d_int_array, d_assignment_map); k_evolve_psi<<<STD_CONFIG>>> (d_float_temp1, d_assignment_map, d_speed, d_speed_dx, d_speed_dy, 0.5f, img_width, img_height, img_pitch); int new_covered_pixels = do_reduction_count(d_assignment_map, d_int_temp1, img_pixels_pow2); float relative_inc = (float)(new_covered_pixels - old_covered_pixels) / (float)img_realpixels; old_covered_pixels = new_covered_pixels; if (relative_inc < 1e-4 && new_covered_pixels >= img_realpixels/2) break; } // Assign any remaining unassigned areas and generate the final superpixel boundaries k_fill_holes<<<STD_CONFIG>>> (d_assignment_map, img_width, img_height, img_pitch); cudaThreadSynchronize(); /* cutStopTimer(timer); printf("time: %f ms\n", cutGetTimerValue(timer)); */ // Copy data from device memory to output cudaMemcpy(output, d_assignment_map, img_pixels*sizeof(int), cudaMemcpyDeviceToHost); return iter; } void _cleanup() { cudaFree(d_uchar4_temp1); cudaFree(d_uchar1_temp1); cudaFree(d_float_temp1); cudaFree(d_float_temp2); cudaFree(d_int_temp1); cudaFree(d_indirection_map); cudaFree(d_assignment_map); cudaFree(d_speed); cudaFree(d_speed_dx); cudaFree(d_speed_dy); cudaFree(d_seed_locations); cudaFreeArray(d_float_array); cudaFreeArray(d_int2_array); cudaFreeArray(d_int_array); } // Template explicit instantiation template int extractSuperpixels(unsigned int* output, unsigned char* input_img); template int extractSuperpixels(unsigned int* output, float* input_img); template int extractSuperpixels(unsigned int* output, uint64_t* input_img);
efbeb109dcfce07419fa24434ab223202997d772.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <cub/block/block_store.cuh> #include <cub/block/block_radix_sort.cuh> #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(ArgMaxOrMin, LAYER_ARG_MAX_OR_MIN); template <typename K, typename V> using KeyValuePair = hipcub::KeyValuePair<K, V>; template <typename K, typename V> using BlockReduce = hipcub::BlockReduce<KeyValuePair<K, V>, TNN_CUDA_NUM_THREADS>; template <typename T, typename ReductionOpT> __global__ void argmaxmin_kernel( const T* input, const int outer_size, const int inner_size, const int stride, const ReductionOpT reducer, const T init, int* output) { __shared__ typename BlockReduce<int, T>::TempStorage temp_storage; for (int idx = blockIdx.x; idx < outer_size; idx += gridDim.x) { int i = idx / stride; int j = idx % stride; KeyValuePair<int, T> kv = {-1, init}; for (int k = threadIdx.x; k < inner_size; k += blockDim.x) { kv = reducer({k, input[i * inner_size * stride + k * stride + j]}, kv); } kv = BlockReduce<int, T>(temp_storage).Reduce(kv, reducer); if (threadIdx.x == 0) { output[idx] = static_cast<int>(kv.key); } __syncthreads(); } } Status CudaArgMaxOrMinLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaArgMaxOrMinLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaArgMaxOrMinLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto params = dynamic_cast<ArgMaxOrMinLayerParam *>(param_); CHECK_PARAM_NULL(params); Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto input_dims = input_blob->GetBlobDesc().dims; int axis = params->axis; int num = DimsVectorUtils::Count(input_dims, 0, axis); int channels = input_dims[axis]; int stride = DimsVectorUtils::Count(input_dims, axis + 1); if (stride == 0) { stride = 1; } if (input_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) { float *input_data = static_cast<float *>(input_blob->GetHandle().base); int *output_data = static_cast<int *>(output_blob->GetHandle().base); if (params->mode == 0) { hipLaunchKernelGGL(( argmaxmin_kernel), dim3(num * stride), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), input_data, num * stride, channels, stride, hipcub::ArgMin(), FLT_MAX, output_data); } else { hipLaunchKernelGGL(( argmaxmin_kernel), dim3(num * stride), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), input_data, num * stride, channels, stride, hipcub::ArgMax(), -FLT_MAX, output_data); } } else if (output_blob->GetBlobDesc().data_type == DATA_TYPE_INT8) { LOGE("Error: layer acc dont support datatype: %d\n", output_blob->GetBlobDesc().data_type); return Status(TNNERR_MODEL_ERR, "Error: layer acc dont support datatype"); } else { LOGE("Error: layer acc dont support datatype: %d\n", output_blob->GetBlobDesc().data_type); return Status(TNNERR_MODEL_ERR, "Error: layer acc dont support datatype"); } return TNN_OK; } REGISTER_CUDA_ACC(ArgMaxOrMin, LAYER_ARG_MAX_OR_MIN); } // namespace TNN_NS
efbeb109dcfce07419fa24434ab223202997d772.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include <cub/cub.cuh> #include <cub/block/block_load.cuh> #include <cub/block/block_store.cuh> #include <cub/block/block_radix_sort.cuh> #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(ArgMaxOrMin, LAYER_ARG_MAX_OR_MIN); template <typename K, typename V> using KeyValuePair = cub::KeyValuePair<K, V>; template <typename K, typename V> using BlockReduce = cub::BlockReduce<KeyValuePair<K, V>, TNN_CUDA_NUM_THREADS>; template <typename T, typename ReductionOpT> __global__ void argmaxmin_kernel( const T* input, const int outer_size, const int inner_size, const int stride, const ReductionOpT reducer, const T init, int* output) { __shared__ typename BlockReduce<int, T>::TempStorage temp_storage; for (int idx = blockIdx.x; idx < outer_size; idx += gridDim.x) { int i = idx / stride; int j = idx % stride; KeyValuePair<int, T> kv = {-1, init}; for (int k = threadIdx.x; k < inner_size; k += blockDim.x) { kv = reducer({k, input[i * inner_size * stride + k * stride + j]}, kv); } kv = BlockReduce<int, T>(temp_storage).Reduce(kv, reducer); if (threadIdx.x == 0) { output[idx] = static_cast<int>(kv.key); } __syncthreads(); } } Status CudaArgMaxOrMinLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaArgMaxOrMinLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaArgMaxOrMinLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto params = dynamic_cast<ArgMaxOrMinLayerParam *>(param_); CHECK_PARAM_NULL(params); Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto input_dims = input_blob->GetBlobDesc().dims; int axis = params->axis; int num = DimsVectorUtils::Count(input_dims, 0, axis); int channels = input_dims[axis]; int stride = DimsVectorUtils::Count(input_dims, axis + 1); if (stride == 0) { stride = 1; } if (input_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) { float *input_data = static_cast<float *>(input_blob->GetHandle().base); int *output_data = static_cast<int *>(output_blob->GetHandle().base); if (params->mode == 0) { argmaxmin_kernel<<<num * stride, TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( input_data, num * stride, channels, stride, cub::ArgMin(), FLT_MAX, output_data); } else { argmaxmin_kernel<<<num * stride, TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( input_data, num * stride, channels, stride, cub::ArgMax(), -FLT_MAX, output_data); } } else if (output_blob->GetBlobDesc().data_type == DATA_TYPE_INT8) { LOGE("Error: layer acc dont support datatype: %d\n", output_blob->GetBlobDesc().data_type); return Status(TNNERR_MODEL_ERR, "Error: layer acc dont support datatype"); } else { LOGE("Error: layer acc dont support datatype: %d\n", output_blob->GetBlobDesc().data_type); return Status(TNNERR_MODEL_ERR, "Error: layer acc dont support datatype"); } return TNN_OK; } REGISTER_CUDA_ACC(ArgMaxOrMin, LAYER_ARG_MAX_OR_MIN); } // namespace TNN_NS
f41d6dbd0e7517dbfb2e77fa072e2027da45339a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 Foundation for Research and Technology - Hellas * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 [1] [1] * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. * * Links: * ------ * [1] http://www.apache.org/licenses/LICENSE-2.0 [1] */ #include <hip/hip_runtime.h> #include <cupti.h> #include <stdio.h> #include <pthread.h> #include <unistd.h> #include <occupancy.cuh> //#define SYNCHRONIZE_WITH_EVENT CUpti_EventID idWarps; CUpti_EventID idCycles; int cuptiEventSet = 0; cupti_eventData *cuptiEvent; CUpti_SubscriberHandle subscriber; hipEvent_t start_event, stop_event; int testComplete; #define CHECK_CU_ERROR(err, cufunc) \ if (err != hipSuccess) \ { \ printf ("%s:%d: error %d for CUDA Driver API function '%s'\n", \ __FILE__, __LINE__, err, cufunc); \ exit(-1); \ } #define CHECK_CUPTI_ERROR(err, cuptifunc) \ if (err != CUPTI_SUCCESS) \ { \ const char *errstr; \ cuptiGetResultString(err, &errstr); \ printf ("%s:%d:Error %s for CUPTI API function '%s'.\n", \ __FILE__, __LINE__, errstr, cuptifunc); \ exit(-1); \ } void start_event_collection(){ CUptiResult cuptiErr; hipError_t err; hipCtx_t context = 0; hipDevice_t dev = 0; hipCtx_t context_g = 0; int maxDev; int deviceNum = 0; const char eventNameWarps[] = {'a','c','t','i','v','e','_','w','a','r','p','s','\0'}; const char eventNameCycles[] = {'a','c','t','i','v','e','_','c','y','c','l','e','s','\0'}; // err = hipDeviceGet(&dev, deviceNum); // CHECK_CU_ERROR(err, "hipDeviceGet"); hipGetDevice(&dev); if(!cuptiEventSet){ hipGetDeviceCount(&maxDev); cuptiEvent= (cupti_eventData*)malloc(maxDev*sizeof(cupti_eventData)); } /* creating context */ err = hipCtxCreate(&context, 0, dev); CHECK_CU_ERROR(err, "hipCtxCreate"); context_g = context; /* Creating event group for profiling */ cuptiErr = cuptiEventGroupCreate(context, &cuptiEvent[dev].eventGroup, 0); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupCreate"); cuptiErr = cuptiEventGetIdFromName(dev, eventNameWarps, &idWarps); if (cuptiErr != CUPTI_SUCCESS) { printf("Invalid eventName: %s\n", eventNameWarps); return; } cuptiErr = cuptiEventGetIdFromName(dev, eventNameCycles, &idCycles); if (cuptiErr != CUPTI_SUCCESS) { printf("Invalid eventName: %s\n", eventNameCycles); return; } /* adding events to the profiling group */ cuptiErr = cuptiEventGroupAddEvent(cuptiEvent[dev].eventGroup, idWarps); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupAddEvent"); cuptiErr = cuptiEventGroupAddEvent(cuptiEvent[dev].eventGroup, idCycles); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupAddEvent"); #ifdef SYNCHRONIZE_WITH_EVENT hipEventRecord(start_event, 0); hipDeviceSynchronize(); #endif cuptiErr = cuptiSetEventCollectionMode(context_g, CUPTI_EVENT_COLLECTION_MODE_KERNEL); CHECK_CUPTI_ERROR(cuptiErr, "cuptiSetEventCollectionMode"); cuptiErr = cuptiEventGroupEnable(cuptiEvent[dev].eventGroup); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupEnable"); printf("Setted device %d\n",dev); } void stop_event_collection(){ CUptiResult cuptiErr; int dev; hipGetDevice(&dev); hipDeviceSynchronize(); cuptiErr = cuptiEventGroupDisable(cuptiEvent[dev].eventGroup); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupDisable"); } void get_occupancy(){ CUptiResult cuptiErr; size_t bytesRead; uint64_t activeWarps; uint64_t activeCycles; int maxWarps; hipDevice_t dev1; hipGetDevice(&dev1); #ifdef SYNCHRONIZE_WITH_EVENT/*uses events to synchronize with the kernel execution*/ hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); #endif bytesRead = sizeof (uint64_t); //hipDeviceSynchronize(); cuptiErr = cuptiEventGroupReadEvent(cuptiEvent[dev1].eventGroup, CUPTI_EVENT_READ_FLAG_NONE, idWarps, &bytesRead, &activeWarps); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupReadEvent"); cuptiErr = cuptiEventGroupReadEvent(cuptiEvent[dev1].eventGroup, CUPTI_EVENT_READ_FLAG_NONE, idCycles, &bytesRead, &activeCycles); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupReadEvent"); hipDeviceProp_t prop; hipDevice_t device; hipGetDevice(&device); hipGetDeviceProperties(&prop, device); maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; if(!activeCycles){ printf("Active cycles: %f\nActive warps: %f, on device %d\n",(double)activeCycles,(double)activeWarps,dev1); }else{ printf("Achieved occupancy: %f%c, on device %d\n",((double)activeWarps / (double)activeCycles) / (double)maxWarps * 100,37,dev1 ); } } void * sampling_func(void *arg){ uint64_t valueBuffer[2]; size_t eventValueBufferSize = 2*sizeof(uint64_t); size_t eventIdArraySize = 2*sizeof(uint32_t); size_t numEventIdsRead; int maxWarps; CUpti_EventID eventIds[2]; CUptiResult cuptiErr; eventIds[0] = idWarps; eventIds[1] = idCycles; hipDeviceProp_t prop; hipDevice_t device; hipGetDevice(&device); hipGetDeviceProperties(&prop, device); maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; while (!testComplete) { cuptiErr = cuptiEventGroupReadAllEvents(cuptiEvent[device].eventGroup, CUPTI_EVENT_READ_FLAG_NONE, &eventValueBufferSize, valueBuffer, &eventIdArraySize, eventIds, &numEventIdsRead); //cuptiErr = cuptiDeviceGetTimestamp(context, &timeStamp); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupReadAllEvents"); double occupancy = 0; if(valueBuffer[1]){ occupancy = (double)valueBuffer[0] / (double)valueBuffer[1] / maxWarps*100; } printf("%f\n",occupancy);/*todo cant use printf*/ //timeStampOld = timeStamp; usleep(1000); } return NULL; } void start_sampling(){ int status = 0; pthread_t pThread; testComplete = 0; status = pthread_create(&pThread, NULL, sampling_func, NULL); if (status != 0) { perror("pthread_create"); exit(-1); } } void stop_sampling(){ //printf("Stop sampling thread\n"); testComplete = 1; }
f41d6dbd0e7517dbfb2e77fa072e2027da45339a.cu
/* * Copyright 2018 Foundation for Research and Technology - Hellas * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 [1] [1] * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. * * Links: * ------ * [1] http://www.apache.org/licenses/LICENSE-2.0 [1] */ #include <cuda.h> #include <cupti.h> #include <stdio.h> #include <pthread.h> #include <unistd.h> #include <occupancy.cuh> //#define SYNCHRONIZE_WITH_EVENT CUpti_EventID idWarps; CUpti_EventID idCycles; int cuptiEventSet = 0; cupti_eventData *cuptiEvent; CUpti_SubscriberHandle subscriber; cudaEvent_t start_event, stop_event; int testComplete; #define CHECK_CU_ERROR(err, cufunc) \ if (err != CUDA_SUCCESS) \ { \ printf ("%s:%d: error %d for CUDA Driver API function '%s'\n", \ __FILE__, __LINE__, err, cufunc); \ exit(-1); \ } #define CHECK_CUPTI_ERROR(err, cuptifunc) \ if (err != CUPTI_SUCCESS) \ { \ const char *errstr; \ cuptiGetResultString(err, &errstr); \ printf ("%s:%d:Error %s for CUPTI API function '%s'.\n", \ __FILE__, __LINE__, errstr, cuptifunc); \ exit(-1); \ } void start_event_collection(){ CUptiResult cuptiErr; CUresult err; CUcontext context = 0; CUdevice dev = 0; CUcontext context_g = 0; int maxDev; int deviceNum = 0; const char eventNameWarps[] = {'a','c','t','i','v','e','_','w','a','r','p','s','\0'}; const char eventNameCycles[] = {'a','c','t','i','v','e','_','c','y','c','l','e','s','\0'}; // err = cuDeviceGet(&dev, deviceNum); // CHECK_CU_ERROR(err, "cuDeviceGet"); cudaGetDevice(&dev); if(!cuptiEventSet){ cudaGetDeviceCount(&maxDev); cuptiEvent= (cupti_eventData*)malloc(maxDev*sizeof(cupti_eventData)); } /* creating context */ err = cuCtxCreate(&context, 0, dev); CHECK_CU_ERROR(err, "cuCtxCreate"); context_g = context; /* Creating event group for profiling */ cuptiErr = cuptiEventGroupCreate(context, &cuptiEvent[dev].eventGroup, 0); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupCreate"); cuptiErr = cuptiEventGetIdFromName(dev, eventNameWarps, &idWarps); if (cuptiErr != CUPTI_SUCCESS) { printf("Invalid eventName: %s\n", eventNameWarps); return; } cuptiErr = cuptiEventGetIdFromName(dev, eventNameCycles, &idCycles); if (cuptiErr != CUPTI_SUCCESS) { printf("Invalid eventName: %s\n", eventNameCycles); return; } /* adding events to the profiling group */ cuptiErr = cuptiEventGroupAddEvent(cuptiEvent[dev].eventGroup, idWarps); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupAddEvent"); cuptiErr = cuptiEventGroupAddEvent(cuptiEvent[dev].eventGroup, idCycles); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupAddEvent"); #ifdef SYNCHRONIZE_WITH_EVENT cudaEventRecord(start_event, 0); cudaDeviceSynchronize(); #endif cuptiErr = cuptiSetEventCollectionMode(context_g, CUPTI_EVENT_COLLECTION_MODE_KERNEL); CHECK_CUPTI_ERROR(cuptiErr, "cuptiSetEventCollectionMode"); cuptiErr = cuptiEventGroupEnable(cuptiEvent[dev].eventGroup); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupEnable"); printf("Setted device %d\n",dev); } void stop_event_collection(){ CUptiResult cuptiErr; int dev; cudaGetDevice(&dev); cudaDeviceSynchronize(); cuptiErr = cuptiEventGroupDisable(cuptiEvent[dev].eventGroup); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupDisable"); } void get_occupancy(){ CUptiResult cuptiErr; size_t bytesRead; uint64_t activeWarps; uint64_t activeCycles; int maxWarps; CUdevice dev1; cudaGetDevice(&dev1); #ifdef SYNCHRONIZE_WITH_EVENT/*uses events to synchronize with the kernel execution*/ cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); #endif bytesRead = sizeof (uint64_t); //cudaDeviceSynchronize(); cuptiErr = cuptiEventGroupReadEvent(cuptiEvent[dev1].eventGroup, CUPTI_EVENT_READ_FLAG_NONE, idWarps, &bytesRead, &activeWarps); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupReadEvent"); cuptiErr = cuptiEventGroupReadEvent(cuptiEvent[dev1].eventGroup, CUPTI_EVENT_READ_FLAG_NONE, idCycles, &bytesRead, &activeCycles); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupReadEvent"); cudaDeviceProp prop; CUdevice device; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; if(!activeCycles){ printf("Active cycles: %f\nActive warps: %f, on device %d\n",(double)activeCycles,(double)activeWarps,dev1); }else{ printf("Achieved occupancy: %f%c, on device %d\n",((double)activeWarps / (double)activeCycles) / (double)maxWarps * 100,37,dev1 ); } } void * sampling_func(void *arg){ uint64_t valueBuffer[2]; size_t eventValueBufferSize = 2*sizeof(uint64_t); size_t eventIdArraySize = 2*sizeof(uint32_t); size_t numEventIdsRead; int maxWarps; CUpti_EventID eventIds[2]; CUptiResult cuptiErr; eventIds[0] = idWarps; eventIds[1] = idCycles; cudaDeviceProp prop; CUdevice device; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; while (!testComplete) { cuptiErr = cuptiEventGroupReadAllEvents(cuptiEvent[device].eventGroup, CUPTI_EVENT_READ_FLAG_NONE, &eventValueBufferSize, valueBuffer, &eventIdArraySize, eventIds, &numEventIdsRead); //cuptiErr = cuptiDeviceGetTimestamp(context, &timeStamp); CHECK_CUPTI_ERROR(cuptiErr, "cuptiEventGroupReadAllEvents"); double occupancy = 0; if(valueBuffer[1]){ occupancy = (double)valueBuffer[0] / (double)valueBuffer[1] / maxWarps*100; } printf("%f\n",occupancy);/*todo cant use printf*/ //timeStampOld = timeStamp; usleep(1000); } return NULL; } void start_sampling(){ int status = 0; pthread_t pThread; testComplete = 0; status = pthread_create(&pThread, NULL, sampling_func, NULL); if (status != 0) { perror("pthread_create"); exit(-1); } } void stop_sampling(){ //printf("Stop sampling thread\n"); testComplete = 1; }
c76a3a0115c4ceefe7368610ebfba0b3adead1df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void awkward_ByteMaskedArray_getitem_nextcarry_kernel(int64_t* prefixed_mask, int64_t* to_carry, int8_t* mask, int64_t length) { int64_t block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int64_t thread_id = block_id * blockDim.x + threadIdx.x; if(thread_id < length) { if (mask[thread_id] != 0) { to_carry[prefixed_mask[thread_id] - 1] = thread_id; } } }
c76a3a0115c4ceefe7368610ebfba0b3adead1df.cu
#include "includes.h" __global__ void awkward_ByteMaskedArray_getitem_nextcarry_kernel(int64_t* prefixed_mask, int64_t* to_carry, int8_t* mask, int64_t length) { int64_t block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int64_t thread_id = block_id * blockDim.x + threadIdx.x; if(thread_id < length) { if (mask[thread_id] != 0) { to_carry[prefixed_mask[thread_id] - 1] = thread_id; } } }
5f04a375d3fa9be81e7ef3d4220dcee8a66b980e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> void check(const char *file, const int line, hipError_t err) { if (err != hipSuccess) { std::cerr << file << ":" << line << " CUDA call failed with error: " << hipGetErrorString(err) << std::endl; std::terminate(); } } #define CHECK(x) check(__FILE__, __LINE__, (x)) __global__ void sum(const float *a, float *b) { int col = threadIdx.x + blockIdx.x * blockDim.x; int n = blockDim.x * gridDim.x; float sum = 0; int idx = col; for (int row = 0; row < n; row++) { sum += a[idx]; idx += n; } b[col] = sum; } int main() { const int N = 8192; float *ha, *hb; float *da, *db; ha = new float [N * N]; hb = new float [N]; CHECK(hipMalloc(&da, N * N * sizeof(float))); CHECK(hipMalloc(&db, N * sizeof(float))); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) ha[i * N + j] = sin(i * N + j); CHECK(hipMemcpy(da, ha, N * N * sizeof(float), hipMemcpyHostToDevice)); hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start, 0)); dim3 block(1024); dim3 grid(N / block.x); hipLaunchKernelGGL(( sum), dim3(grid), dim3(block), 0, 0, da, db); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); float timems; CHECK(hipEventElapsedTime(&timems, start, stop)); std::cout << "Kernel elapsed time: " << timems << " ms" << std::endl; CHECK(hipMemcpy(hb, db, N * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < 10; i++) std::cout << "b[" << i << "] = " << hb[i] << std::endl; std::cout << "..." << std::endl; for (int i = N - 10; i < N; i++) std::cout << "b[" << i << "] = " << hb[i] << std::endl; CHECK(hipFree(da)); CHECK(hipFree(db)); delete[] ha; delete[] hb; return 0; }
5f04a375d3fa9be81e7ef3d4220dcee8a66b980e.cu
#include <iostream> void check(const char *file, const int line, cudaError_t err) { if (err != cudaSuccess) { std::cerr << file << ":" << line << " CUDA call failed with error: " << cudaGetErrorString(err) << std::endl; std::terminate(); } } #define CHECK(x) check(__FILE__, __LINE__, (x)) __global__ void sum(const float *a, float *b) { int col = threadIdx.x + blockIdx.x * blockDim.x; int n = blockDim.x * gridDim.x; float sum = 0; int idx = col; for (int row = 0; row < n; row++) { sum += a[idx]; idx += n; } b[col] = sum; } int main() { const int N = 8192; float *ha, *hb; float *da, *db; ha = new float [N * N]; hb = new float [N]; CHECK(cudaMalloc(&da, N * N * sizeof(float))); CHECK(cudaMalloc(&db, N * sizeof(float))); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) ha[i * N + j] = sin(i * N + j); CHECK(cudaMemcpy(da, ha, N * N * sizeof(float), cudaMemcpyHostToDevice)); cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start, 0)); dim3 block(1024); dim3 grid(N / block.x); sum<<<grid, block>>>(da, db); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); float timems; CHECK(cudaEventElapsedTime(&timems, start, stop)); std::cout << "Kernel elapsed time: " << timems << " ms" << std::endl; CHECK(cudaMemcpy(hb, db, N * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < 10; i++) std::cout << "b[" << i << "] = " << hb[i] << std::endl; std::cout << "..." << std::endl; for (int i = N - 10; i < N; i++) std::cout << "b[" << i << "] = " << hb[i] << std::endl; CHECK(cudaFree(da)); CHECK(cudaFree(db)); delete[] ha; delete[] hb; return 0; }
fcef167c1b3192c53125df9b968e77b43fef0f77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void zgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, magmaDoubleComplex * A, magmaDoubleComplex * ReA, magmaDoubleComplex * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_z_matrix input matrix A. @param[out] ReA magma_z_matrix* output matrix contaning real contributions. @param[out] ImA magma_z_matrix* output matrix contaning complex contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgedensereimsplit( magma_z_matrix A, magma_z_matrix *ReA, magma_z_matrix *ImA, magma_queue_t queue ) { magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
fcef167c1b3192c53125df9b968e77b43fef0f77.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void zgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, magmaDoubleComplex * A, magmaDoubleComplex * ReA, magmaDoubleComplex * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_z_matrix input matrix A. @param[out] ReA magma_z_matrix* output matrix contaning real contributions. @param[out] ImA magma_z_matrix* output matrix contaning complex contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgedensereimsplit( magma_z_matrix A, magma_z_matrix *ReA, magma_z_matrix *ImA, magma_queue_t queue ) { magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
1fa229cdbddc4a32776ff4e922e561f46c5be704.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include "kmeans.h" static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number){ if(err!=hipSuccess){ fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) #define STREAMS_CNT 4 static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; } /*----< euclid_dist_2() >----------------------------------------------------*/ /* square of Euclid distance between two multi-dimensional points */ __host__ __device__ inline static float euclid_dist_2(int numvariables, int numObjs, int numClusters, float *objects, // [numvariables][numObjs] float *clusters, // [numvariables][numClusters] int objectId, int clusterId) { float ans=0.0; for (int i = 0; i < numvariables; i++) { ans +=(objects[numObjs*i+objectId] - clusters[numClusters * i + clusterId])*(objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]); } return(ans); } /*----< find_nearest_cluster() >---------------------------------------------*/ __global__ static void find_nearest_cluster(int numvariables, int numObjs, int numClusters, float *objects, // [numvariables][numObjs] float *deviceClusters, // [numvariables][numClusters] int *membership, // [numObjs] int *intermediates){ // The type chosen for membershipChanged must be large enough to support reductions! // There are blockDim.x elements, one for each thread in the block. // See numThreadsPerClusterBlock in cuda_kmeans(). //unsigned char *membershipChanged = (unsigned char *)sharedMemory; __shared__ unsigned char membershipChanged[128]; float *clusters = deviceClusters; membershipChanged[threadIdx.x] = 0; int objectId = blockDim.x * blockIdx.x + threadIdx.x; if (objectId < numObjs) { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numvariables, numObjs, numClusters, objects, clusters, objectId, 0); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numvariables, numObjs, numClusters, objects, clusters, objectId, i); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } /* assign the membership to object objectId */ membership[objectId] = index; __syncthreads(); // For membershipChanged[] // blockDim.x *must* be a power of two! // this is a reduction for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } __global__ static void compute_delta(int *deviceIntermediates, int numIntermediates, // The actual number of intermediates int numIntermediates2) // The next power of two { // The number of elements in this array should be equal to numIntermediates2, the number of threads launched. // It *must* be a power of two! extern __shared__ unsigned int intermediates[]; // Copy global intermediate values into shared memory. intermediates[threadIdx.x] = (threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0; __syncthreads(); // numIntermediates2 *must* be a power of two! for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { deviceIntermediates[0] = intermediates[0]; } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numvariables] // clusters [numClusters][numvariables] // dimObjects [numvariables][numObjs] // dimClusters [numvariables][numClusters] // newClusters [numvariables][numClusters] // deviceObjects [numvariables][numObjs] // deviceClusters [numvariables][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numvariables] */ float** cuda_kmeans(float **objects, /* in: [numObjs][numvariables] */ int numvariables, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **dimObjects; float **clusters; /* out: [numClusters][numvariables] */ float **dimClusters; float **newClusters; /* [numvariables][numClusters] */ float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numvariables] layout to new // [numvariables][numObjs] layout malloc2D(dimObjects, numvariables, numObjs, float); for (i = 0; i < numvariables; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } } /* pick first numClusters elements of objects[] as initial cluster centers*/ malloc2D(dimClusters, numvariables, numClusters, float); for (i = 0; i < numvariables; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } } /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*)calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numvariables, numClusters, float); memset(newClusters[0], 0, numvariables * numClusters * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of two, // and it *must* be no larger than the number of bits that will fit into an unsigned char, // the type used to keep track of membership changes in the kernel. const unsigned int numThreadsPerClusterBlock = 128; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; //const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char); const unsigned int numReductionThreads = nextPowerOfTwo(numClusterBlocks); const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); SAFE_CALL((hipMalloc(&deviceObjects, numObjs*numvariables*sizeof(float))), "CUDA malloc error!"); SAFE_CALL((hipMalloc(&deviceClusters, numClusters*numvariables*sizeof(float))), "CUDA malloc error!"); SAFE_CALL((hipMalloc(&deviceMembership, numObjs*sizeof(int))), "CUDA malloc error!"); SAFE_CALL((hipMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int))), "CUDA malloc error!"); SAFE_CALL(hipMemcpy(deviceObjects, dimObjects[0], numObjs*numvariables*sizeof(float), hipMemcpyHostToDevice), "CUDA memory release error!"); SAFE_CALL(hipMemcpy(deviceMembership, membership, numObjs*sizeof(int), hipMemcpyHostToDevice), "CUDA memory release error!"); do{ SAFE_CALL((hipMemcpy(deviceClusters, dimClusters[0], numClusters*numvariables*sizeof(float), hipMemcpyHostToDevice)), "CUDA memory copy from host to device error!"); hipLaunchKernelGGL(( find_nearest_cluster), dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), 0, 0, numvariables, numObjs, numClusters, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_delta), dim3(1),dim3(numReductionThreads), reductionBlockSharedDataSize , 0, deviceIntermediates, numClusterBlocks, numReductionThreads); hipDeviceSynchronize(); int d; SAFE_CALL((hipMemcpy(&d, deviceIntermediates, sizeof(int), hipMemcpyDeviceToHost)), "CUDA memory copy from device to host error!"); delta = (float)d; SAFE_CALL((hipMemcpy(membership, deviceMembership, numObjs*sizeof(int), hipMemcpyDeviceToHost)), "CUDA memory copy from device to host error!"); for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = membership[i]; /* update new cluster centers : sum of objects located within */ newClusterSize[index]++; for (j=0; j<numvariables; j++) newClusters[j][index] += objects[i][j]; } // TODO: Flip the nesting order // TODO: Change layout of newClusters to [numClusters][numvariables] // average the sum and replace old cluster centers with newClusters for (i=0; i<numClusters; i++) { for (j=0; j<numvariables; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; newClusters[j][i] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } delta /= numObjs; } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, numvariables, float); for (i = 0; i < numClusters; i++) { for (j = 0; j < numvariables; j++) { clusters[i][j] = dimClusters[j][i]; } } SAFE_CALL((hipFree(deviceObjects)), "CUDA memory release error!"); SAFE_CALL((hipFree(deviceClusters)), "CUDA memory release error!"); SAFE_CALL((hipFree(deviceMembership)),"CUDA memory release error!"); SAFE_CALL((hipFree(deviceIntermediates)),"CUDA memory release error!"); free(dimObjects[0]); free(dimObjects); free(dimClusters[0]); free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
1fa229cdbddc4a32776ff4e922e561f46c5be704.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include "kmeans.h" static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number){ if(err!=cudaSuccess){ fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) #define STREAMS_CNT 4 static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; } /*----< euclid_dist_2() >----------------------------------------------------*/ /* square of Euclid distance between two multi-dimensional points */ __host__ __device__ inline static float euclid_dist_2(int numvariables, int numObjs, int numClusters, float *objects, // [numvariables][numObjs] float *clusters, // [numvariables][numClusters] int objectId, int clusterId) { float ans=0.0; for (int i = 0; i < numvariables; i++) { ans +=(objects[numObjs*i+objectId] - clusters[numClusters * i + clusterId])*(objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]); } return(ans); } /*----< find_nearest_cluster() >---------------------------------------------*/ __global__ static void find_nearest_cluster(int numvariables, int numObjs, int numClusters, float *objects, // [numvariables][numObjs] float *deviceClusters, // [numvariables][numClusters] int *membership, // [numObjs] int *intermediates){ // The type chosen for membershipChanged must be large enough to support reductions! // There are blockDim.x elements, one for each thread in the block. // See numThreadsPerClusterBlock in cuda_kmeans(). //unsigned char *membershipChanged = (unsigned char *)sharedMemory; __shared__ unsigned char membershipChanged[128]; float *clusters = deviceClusters; membershipChanged[threadIdx.x] = 0; int objectId = blockDim.x * blockIdx.x + threadIdx.x; if (objectId < numObjs) { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numvariables, numObjs, numClusters, objects, clusters, objectId, 0); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numvariables, numObjs, numClusters, objects, clusters, objectId, i); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } /* assign the membership to object objectId */ membership[objectId] = index; __syncthreads(); // For membershipChanged[] // blockDim.x *must* be a power of two! // this is a reduction for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } __global__ static void compute_delta(int *deviceIntermediates, int numIntermediates, // The actual number of intermediates int numIntermediates2) // The next power of two { // The number of elements in this array should be equal to numIntermediates2, the number of threads launched. // It *must* be a power of two! extern __shared__ unsigned int intermediates[]; // Copy global intermediate values into shared memory. intermediates[threadIdx.x] = (threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0; __syncthreads(); // numIntermediates2 *must* be a power of two! for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { deviceIntermediates[0] = intermediates[0]; } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numvariables] // clusters [numClusters][numvariables] // dimObjects [numvariables][numObjs] // dimClusters [numvariables][numClusters] // newClusters [numvariables][numClusters] // deviceObjects [numvariables][numObjs] // deviceClusters [numvariables][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numvariables] */ float** cuda_kmeans(float **objects, /* in: [numObjs][numvariables] */ int numvariables, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **dimObjects; float **clusters; /* out: [numClusters][numvariables] */ float **dimClusters; float **newClusters; /* [numvariables][numClusters] */ float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numvariables] layout to new // [numvariables][numObjs] layout malloc2D(dimObjects, numvariables, numObjs, float); for (i = 0; i < numvariables; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } } /* pick first numClusters elements of objects[] as initial cluster centers*/ malloc2D(dimClusters, numvariables, numClusters, float); for (i = 0; i < numvariables; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } } /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*)calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numvariables, numClusters, float); memset(newClusters[0], 0, numvariables * numClusters * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of two, // and it *must* be no larger than the number of bits that will fit into an unsigned char, // the type used to keep track of membership changes in the kernel. const unsigned int numThreadsPerClusterBlock = 128; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; //const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char); const unsigned int numReductionThreads = nextPowerOfTwo(numClusterBlocks); const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); SAFE_CALL((cudaMalloc(&deviceObjects, numObjs*numvariables*sizeof(float))), "CUDA malloc error!"); SAFE_CALL((cudaMalloc(&deviceClusters, numClusters*numvariables*sizeof(float))), "CUDA malloc error!"); SAFE_CALL((cudaMalloc(&deviceMembership, numObjs*sizeof(int))), "CUDA malloc error!"); SAFE_CALL((cudaMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int))), "CUDA malloc error!"); SAFE_CALL(cudaMemcpy(deviceObjects, dimObjects[0], numObjs*numvariables*sizeof(float), cudaMemcpyHostToDevice), "CUDA memory release error!"); SAFE_CALL(cudaMemcpy(deviceMembership, membership, numObjs*sizeof(int), cudaMemcpyHostToDevice), "CUDA memory release error!"); do{ SAFE_CALL((cudaMemcpy(deviceClusters, dimClusters[0], numClusters*numvariables*sizeof(float), cudaMemcpyHostToDevice)), "CUDA memory copy from host to device error!"); find_nearest_cluster<<<numClusterBlocks, numThreadsPerClusterBlock>>>(numvariables, numObjs, numClusters, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); cudaDeviceSynchronize(); compute_delta<<<1,numReductionThreads, reductionBlockSharedDataSize >>>(deviceIntermediates, numClusterBlocks, numReductionThreads); cudaDeviceSynchronize(); int d; SAFE_CALL((cudaMemcpy(&d, deviceIntermediates, sizeof(int), cudaMemcpyDeviceToHost)), "CUDA memory copy from device to host error!"); delta = (float)d; SAFE_CALL((cudaMemcpy(membership, deviceMembership, numObjs*sizeof(int), cudaMemcpyDeviceToHost)), "CUDA memory copy from device to host error!"); for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = membership[i]; /* update new cluster centers : sum of objects located within */ newClusterSize[index]++; for (j=0; j<numvariables; j++) newClusters[j][index] += objects[i][j]; } // TODO: Flip the nesting order // TODO: Change layout of newClusters to [numClusters][numvariables] // average the sum and replace old cluster centers with newClusters for (i=0; i<numClusters; i++) { for (j=0; j<numvariables; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; newClusters[j][i] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } delta /= numObjs; } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, numvariables, float); for (i = 0; i < numClusters; i++) { for (j = 0; j < numvariables; j++) { clusters[i][j] = dimClusters[j][i]; } } SAFE_CALL((cudaFree(deviceObjects)), "CUDA memory release error!"); SAFE_CALL((cudaFree(deviceClusters)), "CUDA memory release error!"); SAFE_CALL((cudaFree(deviceMembership)),"CUDA memory release error!"); SAFE_CALL((cudaFree(deviceIntermediates)),"CUDA memory release error!"); free(dimObjects[0]); free(dimObjects); free(dimClusters[0]); free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
e54d921f5be66aa0a969109f837d7fd72a78856b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "include/utils.cuh" #include "../include/structs.h" #include "../include/macros.h" __device__ int IDX(int i, int j, int Lx, int* ex, int* ey){ return i + ex[j] + ey[j] * Lx; } __device__ int IDXcm(int i, int j, int Lx, int Ly){ return i + j * Lx * Ly; } void pointerSwap(cudaStruct *deviceOnly){ prec *tempPtr = deviceOnly->f1; deviceOnly->f1 = deviceOnly->f2; deviceOnly->f2 = tempPtr; } void memoryFree(mainStruct host, mainStruct device, cudaStruct deviceOnly){ delete[] host.b; delete[] host.w; hipFree(device.b); hipFree(device.w); hipFree(deviceOnly.h); hipFree(deviceOnly.f1); hipFree(deviceOnly.f2); hipFree(deviceOnly.binary1); hipFree(deviceOnly.binary2); } void memoryInit(configStruct config, cudaStruct *deviceOnly, mainStruct *device, mainStruct host){ uint pBytes = config.Lx * config.Ly * sizeof(prec); //uint iBytes = config.Lx * config.Ly * sizeof(int); uint uBytes = config.Lx * config.Ly * sizeof(unsigned char); hipMalloc((void**)&(device->w), pBytes); hipMalloc((void**)&(device->b), pBytes); hipMemcpy(device->w, host.w, pBytes, hipMemcpyHostToDevice); hipMemcpy(device->b, host.b, pBytes, hipMemcpyHostToDevice); hipMalloc((void**)&(deviceOnly->h), pBytes); hipMalloc((void**)&(deviceOnly->f1), 9 * pBytes); hipMalloc((void**)&(deviceOnly->f2), 9 * pBytes); hipMalloc((void**)&(deviceOnly->binary1), uBytes); hipMalloc((void**)&(deviceOnly->binary2), uBytes); }
e54d921f5be66aa0a969109f837d7fd72a78856b.cu
#include <cuda_runtime.h> #include "include/utils.cuh" #include "../include/structs.h" #include "../include/macros.h" __device__ int IDX(int i, int j, int Lx, int* ex, int* ey){ return i + ex[j] + ey[j] * Lx; } __device__ int IDXcm(int i, int j, int Lx, int Ly){ return i + j * Lx * Ly; } void pointerSwap(cudaStruct *deviceOnly){ prec *tempPtr = deviceOnly->f1; deviceOnly->f1 = deviceOnly->f2; deviceOnly->f2 = tempPtr; } void memoryFree(mainStruct host, mainStruct device, cudaStruct deviceOnly){ delete[] host.b; delete[] host.w; cudaFree(device.b); cudaFree(device.w); cudaFree(deviceOnly.h); cudaFree(deviceOnly.f1); cudaFree(deviceOnly.f2); cudaFree(deviceOnly.binary1); cudaFree(deviceOnly.binary2); } void memoryInit(configStruct config, cudaStruct *deviceOnly, mainStruct *device, mainStruct host){ uint pBytes = config.Lx * config.Ly * sizeof(prec); //uint iBytes = config.Lx * config.Ly * sizeof(int); uint uBytes = config.Lx * config.Ly * sizeof(unsigned char); cudaMalloc((void**)&(device->w), pBytes); cudaMalloc((void**)&(device->b), pBytes); cudaMemcpy(device->w, host.w, pBytes, cudaMemcpyHostToDevice); cudaMemcpy(device->b, host.b, pBytes, cudaMemcpyHostToDevice); cudaMalloc((void**)&(deviceOnly->h), pBytes); cudaMalloc((void**)&(deviceOnly->f1), 9 * pBytes); cudaMalloc((void**)&(deviceOnly->f2), 9 * pBytes); cudaMalloc((void**)&(deviceOnly->binary1), uBytes); cudaMalloc((void**)&(deviceOnly->binary2), uBytes); }
9e67aec971efbcd35fc0000da48030f077facf7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* nvcc -I/usr/local/cuda-8.0/samples/common/inc/ -arch=sm_35 -rdc=true QuickSort.cu -o QuickSort.out -lcudadevrt */ #include <helper_string.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <time.h> #define MAX_DEPTH 16 __global__ void simple_mergesort(int* data,int *dataAux,int begin,int end, int depth){ int middle = (end+begin)/2; int i0 = begin; int i1 = middle; int index; int n = end-begin; // Used to implement recursions using CUDA parallelism. hipStream_t s,s1; if(n < 2){ return; } //launches recursion on left and right part hipStreamCreateWithFlags(&s,hipStreamNonBlocking); hipLaunchKernelGGL(( simple_mergesort), dim3(1), dim3(1), 0, s , data,dataAux, begin, middle, depth+1); hipStreamDestroy(s); hipStreamCreateWithFlags(&s1,hipStreamNonBlocking); hipLaunchKernelGGL(( simple_mergesort), dim3(1), dim3(1), 0, s1 , data,dataAux, middle, end, depth+1); hipStreamDestroy(s1); hipDeviceSynchronize(); for (index = begin; index < end; index++) { if (i0 < middle && (i1 >= end || data[i0] <= data[i1])){ dataAux[index] = data[i0]; i0++; } else{ dataAux[index] = data[i1]; i1++; } } // copy data from auxilarry memory to main memory for(index = begin; index < end; index++){ data[index] = dataAux[index]; } } // gcc compiled code will call this function to access CUDA Merge Sort. extern "C" void gpumerge_sort(int* a,int n){ int* gpuData; int* gpuAuxData; int left = 0; int right = n; hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH); // Allocate GPU memory. hipMalloc((void**)&gpuData,n*sizeof(int)); hipMalloc((void**)&gpuAuxData,n*sizeof(int)); hipMemcpy(gpuData,a, n*sizeof(int), hipMemcpyHostToDevice); // Launch on device hipLaunchKernelGGL(( simple_mergesort), dim3(1), dim3(1) , 0, 0, gpuData,gpuAuxData, left, right, 0); hipDeviceSynchronize(); hipMemcpy(a,gpuData, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(gpuAuxData); hipFree(gpuData); hipDeviceReset(); } int main(int argc, char **argv) { int size=atoi(argv[1]) ; clock_t start, end; int i,printvector =atoi(argv[2]); int* array; array = (int*)malloc(size*sizeof(int)); srand(time(NULL)); int *vet = array; for(i = 0; i < size; i++) { array[i] = rand() % size; } int *vet_aux = (int*)malloc(sizeof(int)*size); // Create a copy of the vector to print it berfore and after it is sorted in case this option is enabled for(i=0; i<size; i++){ vet_aux[i] = vet[i]; } // Sort the array start = clock(); gpumerge_sort(array,size); end = clock(); if(printvector) { printf("Original: "); for(i=0; i<size; i++){ printf("%d ", vet_aux[i]); } printf("\n\nSorted: "); for(i=0; i<size; i++){ printf("%d ", vet[i]); }} printf("\n-- Analysis --\n\n"); printf("Sorting algorithm: MergeSort\n"); printf("Array type:Random"); printf("Array size: %d\n", size); double elapsed_time; elapsed_time = (((double)(end-start))/CLOCKS_PER_SEC); printf("Time elapsed: %f s\n", elapsed_time); free(vet); free(vet_aux); printf("\n\n"); return 0; }
9e67aec971efbcd35fc0000da48030f077facf7b.cu
/* nvcc -I/usr/local/cuda-8.0/samples/common/inc/ -arch=sm_35 -rdc=true QuickSort.cu -o QuickSort.out -lcudadevrt */ #include <helper_string.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <time.h> #define MAX_DEPTH 16 __global__ void simple_mergesort(int* data,int *dataAux,int begin,int end, int depth){ int middle = (end+begin)/2; int i0 = begin; int i1 = middle; int index; int n = end-begin; // Used to implement recursions using CUDA parallelism. cudaStream_t s,s1; if(n < 2){ return; } //launches recursion on left and right part cudaStreamCreateWithFlags(&s,cudaStreamNonBlocking); simple_mergesort<<< 1, 1, 0, s >>>(data,dataAux, begin, middle, depth+1); cudaStreamDestroy(s); cudaStreamCreateWithFlags(&s1,cudaStreamNonBlocking); simple_mergesort<<< 1, 1, 0, s1 >>>(data,dataAux, middle, end, depth+1); cudaStreamDestroy(s1); cudaDeviceSynchronize(); for (index = begin; index < end; index++) { if (i0 < middle && (i1 >= end || data[i0] <= data[i1])){ dataAux[index] = data[i0]; i0++; } else{ dataAux[index] = data[i1]; i1++; } } // copy data from auxilarry memory to main memory for(index = begin; index < end; index++){ data[index] = dataAux[index]; } } // gcc compiled code will call this function to access CUDA Merge Sort. extern "C" void gpumerge_sort(int* a,int n){ int* gpuData; int* gpuAuxData; int left = 0; int right = n; cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH); // Allocate GPU memory. cudaMalloc((void**)&gpuData,n*sizeof(int)); cudaMalloc((void**)&gpuAuxData,n*sizeof(int)); cudaMemcpy(gpuData,a, n*sizeof(int), cudaMemcpyHostToDevice); // Launch on device simple_mergesort<<< 1, 1 >>>(gpuData,gpuAuxData, left, right, 0); cudaDeviceSynchronize(); cudaMemcpy(a,gpuData, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(gpuAuxData); cudaFree(gpuData); cudaDeviceReset(); } int main(int argc, char **argv) { int size=atoi(argv[1]) ; clock_t start, end; int i,printvector =atoi(argv[2]); int* array; array = (int*)malloc(size*sizeof(int)); srand(time(NULL)); int *vet = array; for(i = 0; i < size; i++) { array[i] = rand() % size; } int *vet_aux = (int*)malloc(sizeof(int)*size); // Create a copy of the vector to print it berfore and after it is sorted in case this option is enabled for(i=0; i<size; i++){ vet_aux[i] = vet[i]; } // Sort the array start = clock(); gpumerge_sort(array,size); end = clock(); if(printvector) { printf("Original: "); for(i=0; i<size; i++){ printf("%d ", vet_aux[i]); } printf("\n\nSorted: "); for(i=0; i<size; i++){ printf("%d ", vet[i]); }} printf("\n-- Analysis --\n\n"); printf("Sorting algorithm: MergeSort\n"); printf("Array type:Random"); printf("Array size: %d\n", size); double elapsed_time; elapsed_time = (((double)(end-start))/CLOCKS_PER_SEC); printf("Time elapsed: %f s\n", elapsed_time); free(vet); free(vet_aux); printf("\n\n"); return 0; }
0808051e9359a264375342042cece8bd0d2f04ba.hip
// !!! This is a file automatically generated by hipify!!! /* Using cuSPARSE for matrix vector multplication of block_match technique. */ #include <algorithm> #include <hip/hip_runtime.h> #include <hipsparse.h> #include <time.h> #include "utils.h" int main(int argc, char *argv[]) { /*********************************************** * initialize program's input parameters * ***********************************************/ double alpha = 1; double beta = 1; double norm = 0; unsigned bin_width = 10; hipsparseHandle_t handle = 0; hipsparseMatDescr_t descr = 0; hipsparseCreate(&handle); hipsparseCreateMatDescr(&descr); h_vec_t<unsigned> h_distance_1; unsigned num_feat_1 = atoi(argv[2]); ReadMatrix(h_distance_1, argv[1], num_feat_1); #ifdef ACCELERATE std::cout << "CUDA" << std::endl; d_vec_t<unsigned> d_distance_1 = distance_1; #endif h_vec_t<double> h_distance_2; unsigned num_feat_2 = atoi(argv[4]); ReadMatrix(h_distance_2, argv[3], num_feat_2); #ifdef ACCELERATE d_vec_t<double> d_distance_2 = distance_2; #endif unsigned num_iters = 20; if (8 == argc) num_iters = atoi(argv[7]); /************************************************** * find unique values of distance1 and their indices ***************************************************/ #ifdef ACCELERATE d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1); d_uniq_keys.erase( remove_if(d_uniq_keys.begin(), d_uniq_keys.end(), IsLessThan(bin_width)), d_uniq_keys.end()); #else //std::cout << "HOST" << std::endl; h_vec_t<unsigned> h_uniq_keys = FindUniques(h_distance_1); h_uniq_keys.erase( remove_if(h_uniq_keys.begin(), h_uniq_keys.end(), IsLessThan(bin_width)), h_uniq_keys.end()); #endif #ifdef ACCELERATE d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()]; for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { d_keys_idcs[i].resize(d_distance_1.size()); } #else h_vec_t<int> *h_keys_idcs = new h_vec_t<int>[h_uniq_keys.size()]; for (unsigned i = 0; i < h_uniq_keys.size(); ++i) { h_keys_idcs[i].resize(h_distance_1.size()); } #endif counting_iterator<unsigned> first_idx(0); counting_iterator<unsigned> last_idx = first_idx + num_feat_1; #ifdef ACCELERATE for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { transform(ZIP2(d_distance_1.begin(), first_idx), ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(), IsEqual(d_uniq_keys[i])); d_keys_idcs[i].erase( remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1), d_keys_idcs[i].end()); } #else for (unsigned i = 0; i < h_uniq_keys.size(); ++i) { transform(ZIP2(h_distance_1.begin(), first_idx), ZIP2(h_distance_1.end(), last_idx), h_keys_idcs[i].begin(), IsEqual(h_uniq_keys[i])); h_keys_idcs[i].erase( remove(h_keys_idcs[i].begin(), h_keys_idcs[i].end(), -1), h_keys_idcs[i].end()); } #endif /*************************************************** * construct CSR sparse affinity blocks * ***************************************************/ unsigned len_affinity_block = num_feat_2 * num_feat_2; //#ifdef ACCELERATE // d_vec_t<double> d_affinity_blocks(d_uniq_keys.size() * // len_affinity_block); //#else h_vec_t<double> h_affinity_blocks(h_uniq_keys.size() * len_affinity_block); //#endif // //#ifdef ACCELERATE // d_vec_t<double> csr_val; // d_vec_t<int> csr_col; // d_vec_t<int> csr_row; // d_vec_t<int> csr_blocked_len; // // for (int i = 0; i < d_uniq_keys.size(); ++i) { // transform(d_distance_2.begin(), d_distance_2.end(), // d_affinity_blocks.begin() + i * len_affinity_block, // Affinity(d_uniq_keys[i])); // // CompressMatrix(csr_val, csr_col, csr_row, // raw_pointer_cast(d_affinity_blocks.begin()) + // i * len_affinity_block, // num_feat_2, num_feat_2); // // csr_blocked_len.push_back(csr_val.size()); // } //#else h_vec_t<double> csr_val; h_vec_t<int> csr_col; h_vec_t<int> csr_row; h_vec_t<int> csr_blocked_len; csr_blocked_len.push_back(0); const clock_t begin_time = clock(); for (int i = 0; i < h_uniq_keys.size(); ++i) { transform(h_distance_2.begin(), h_distance_2.end(), h_affinity_blocks.begin() + i * len_affinity_block, Affinity(h_uniq_keys[i])); CompressMatrix(csr_val, csr_col, csr_row, raw_pointer_cast(h_affinity_blocks.data()) + i * len_affinity_block, num_feat_2, num_feat_2); csr_blocked_len.push_back(csr_val.size()); } d_vec_t<double> d_csr_val = csr_val; d_vec_t<int> d_csr_col = csr_col; d_vec_t<int> d_csr_row = csr_row; std::cout << "affinity runtime: " << (clock() - begin_time) / double(CLOCKS_PER_SEC) * 1000 << std::endl; //#endif //std::cout << "values" // << " " // << "columns" << std::endl; // for (int i = 0; i < h_uniq_keys.size(); ++i) { // for (int j = csr_blocked_len[i]; j < csr_blocked_len[i + 1]; ++j) { // std::cout << csr_val[j] << " " << csr_col[j] << " " << std::endl; // } // std::cout << std::endl; // } // std::cout << std::endl; /****************************************************** * initialize eigen vectors * ******************************************************/ unsigned len_eigen_vec = num_feat_1 * num_feat_2; d_vec_t<double> eigen_vec_new(len_eigen_vec); d_vec_t<double> eigen_vec_old(len_eigen_vec); norm = 1.0 / sqrt(len_eigen_vec); fill(eigen_vec_old.begin(), eigen_vec_old.end(), norm); //#if ACCELERATE // int num_keys = d_uniq_keys.size(); //#else int num_keys = h_uniq_keys.size(); //#endif /******************************************************* * compute eigen values * ********************************************************/ const clock_t begin_time2 = clock(); for (int iter = 0; iter < num_iters; ++iter) { // Create a stream for each operation hipStream_t *streams = (hipStream_t *)malloc(num_keys * sizeof(hipStream_t)); for (int i = 0; i < num_keys; i++) hipStreamCreate(&streams[i]); for (int i = 0; i < num_keys; i++) { hipsparseSetStream(handle, streams[i]); int csr_size = csr_blocked_len[i + 1] - csr_blocked_len[i]; //#ifdef ACCELERATE // for (int j = 0; j < d_keys_idcs[i].size(); j++) { // int row = d_keys_idcs[i][j] / num_feat_1; // int col = d_keys_idcs[i][j] % num_feat_1; //#else for (int j = 0; j < h_keys_idcs[i].size(); j++) { int row = h_keys_idcs[i][j] / num_feat_1; int col = h_keys_idcs[i][j] % num_feat_1; //#endif hipsparseDcsrmv( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2, csr_size, &alpha, descr, raw_pointer_cast(d_csr_val.data() + csr_blocked_len[i]), raw_pointer_cast(d_csr_row.data() + i * (num_feat_2 + 1)), raw_pointer_cast(d_csr_col.data() + csr_blocked_len[i]), raw_pointer_cast(eigen_vec_old.data()) + col * num_feat_2, &beta, raw_pointer_cast(eigen_vec_new.data()) + row * num_feat_2); } } double init = 0; norm = std::sqrt(transform_reduce(eigen_vec_new.begin(), eigen_vec_new.end(), square(), init, thrust::plus<double>())); transform(eigen_vec_new.begin(), eigen_vec_new.end(), eigen_vec_old.begin(), division(norm)); fill(eigen_vec_new.begin(), eigen_vec_new.end(), 0); } std::cout << "Eigen runtime: " << (clock() - begin_time2) / double(CLOCKS_PER_SEC) * 1000 << std::endl; // std::cout << "eigen values" << std::endl; // for (int i = 0; i < eigen_vec_old.size(); i++) { // std::cout << "eigen new value = " << eigen_vec_new[i] << ""; // std::cout << "eigen old value = " << eigen_vec_old[i] << std::endl; // } hipsparseDestroy(handle); return 0; }
0808051e9359a264375342042cece8bd0d2f04ba.cu
/* Using cuSPARSE for matrix vector multplication of block_match technique. */ #include <algorithm> #include <cuda_runtime.h> #include <cusparse.h> #include <time.h> #include "utils.h" int main(int argc, char *argv[]) { /*********************************************** * initialize program's input parameters * ***********************************************/ double alpha = 1; double beta = 1; double norm = 0; unsigned bin_width = 10; cusparseHandle_t handle = 0; cusparseMatDescr_t descr = 0; cusparseCreate(&handle); cusparseCreateMatDescr(&descr); h_vec_t<unsigned> h_distance_1; unsigned num_feat_1 = atoi(argv[2]); ReadMatrix(h_distance_1, argv[1], num_feat_1); #ifdef ACCELERATE std::cout << "CUDA" << std::endl; d_vec_t<unsigned> d_distance_1 = distance_1; #endif h_vec_t<double> h_distance_2; unsigned num_feat_2 = atoi(argv[4]); ReadMatrix(h_distance_2, argv[3], num_feat_2); #ifdef ACCELERATE d_vec_t<double> d_distance_2 = distance_2; #endif unsigned num_iters = 20; if (8 == argc) num_iters = atoi(argv[7]); /************************************************** * find unique values of distance1 and their indices ***************************************************/ #ifdef ACCELERATE d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1); d_uniq_keys.erase( remove_if(d_uniq_keys.begin(), d_uniq_keys.end(), IsLessThan(bin_width)), d_uniq_keys.end()); #else //std::cout << "HOST" << std::endl; h_vec_t<unsigned> h_uniq_keys = FindUniques(h_distance_1); h_uniq_keys.erase( remove_if(h_uniq_keys.begin(), h_uniq_keys.end(), IsLessThan(bin_width)), h_uniq_keys.end()); #endif #ifdef ACCELERATE d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()]; for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { d_keys_idcs[i].resize(d_distance_1.size()); } #else h_vec_t<int> *h_keys_idcs = new h_vec_t<int>[h_uniq_keys.size()]; for (unsigned i = 0; i < h_uniq_keys.size(); ++i) { h_keys_idcs[i].resize(h_distance_1.size()); } #endif counting_iterator<unsigned> first_idx(0); counting_iterator<unsigned> last_idx = first_idx + num_feat_1; #ifdef ACCELERATE for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { transform(ZIP2(d_distance_1.begin(), first_idx), ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(), IsEqual(d_uniq_keys[i])); d_keys_idcs[i].erase( remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1), d_keys_idcs[i].end()); } #else for (unsigned i = 0; i < h_uniq_keys.size(); ++i) { transform(ZIP2(h_distance_1.begin(), first_idx), ZIP2(h_distance_1.end(), last_idx), h_keys_idcs[i].begin(), IsEqual(h_uniq_keys[i])); h_keys_idcs[i].erase( remove(h_keys_idcs[i].begin(), h_keys_idcs[i].end(), -1), h_keys_idcs[i].end()); } #endif /*************************************************** * construct CSR sparse affinity blocks * ***************************************************/ unsigned len_affinity_block = num_feat_2 * num_feat_2; //#ifdef ACCELERATE // d_vec_t<double> d_affinity_blocks(d_uniq_keys.size() * // len_affinity_block); //#else h_vec_t<double> h_affinity_blocks(h_uniq_keys.size() * len_affinity_block); //#endif // //#ifdef ACCELERATE // d_vec_t<double> csr_val; // d_vec_t<int> csr_col; // d_vec_t<int> csr_row; // d_vec_t<int> csr_blocked_len; // // for (int i = 0; i < d_uniq_keys.size(); ++i) { // transform(d_distance_2.begin(), d_distance_2.end(), // d_affinity_blocks.begin() + i * len_affinity_block, // Affinity(d_uniq_keys[i])); // // CompressMatrix(csr_val, csr_col, csr_row, // raw_pointer_cast(d_affinity_blocks.begin()) + // i * len_affinity_block, // num_feat_2, num_feat_2); // // csr_blocked_len.push_back(csr_val.size()); // } //#else h_vec_t<double> csr_val; h_vec_t<int> csr_col; h_vec_t<int> csr_row; h_vec_t<int> csr_blocked_len; csr_blocked_len.push_back(0); const clock_t begin_time = clock(); for (int i = 0; i < h_uniq_keys.size(); ++i) { transform(h_distance_2.begin(), h_distance_2.end(), h_affinity_blocks.begin() + i * len_affinity_block, Affinity(h_uniq_keys[i])); CompressMatrix(csr_val, csr_col, csr_row, raw_pointer_cast(h_affinity_blocks.data()) + i * len_affinity_block, num_feat_2, num_feat_2); csr_blocked_len.push_back(csr_val.size()); } d_vec_t<double> d_csr_val = csr_val; d_vec_t<int> d_csr_col = csr_col; d_vec_t<int> d_csr_row = csr_row; std::cout << "affinity runtime: " << (clock() - begin_time) / double(CLOCKS_PER_SEC) * 1000 << std::endl; //#endif //std::cout << "values" // << " " // << "columns" << std::endl; // for (int i = 0; i < h_uniq_keys.size(); ++i) { // for (int j = csr_blocked_len[i]; j < csr_blocked_len[i + 1]; ++j) { // std::cout << csr_val[j] << " " << csr_col[j] << " " << std::endl; // } // std::cout << std::endl; // } // std::cout << std::endl; /****************************************************** * initialize eigen vectors * ******************************************************/ unsigned len_eigen_vec = num_feat_1 * num_feat_2; d_vec_t<double> eigen_vec_new(len_eigen_vec); d_vec_t<double> eigen_vec_old(len_eigen_vec); norm = 1.0 / sqrt(len_eigen_vec); fill(eigen_vec_old.begin(), eigen_vec_old.end(), norm); //#if ACCELERATE // int num_keys = d_uniq_keys.size(); //#else int num_keys = h_uniq_keys.size(); //#endif /******************************************************* * compute eigen values * ********************************************************/ const clock_t begin_time2 = clock(); for (int iter = 0; iter < num_iters; ++iter) { // Create a stream for each operation cudaStream_t *streams = (cudaStream_t *)malloc(num_keys * sizeof(cudaStream_t)); for (int i = 0; i < num_keys; i++) cudaStreamCreate(&streams[i]); for (int i = 0; i < num_keys; i++) { cusparseSetStream(handle, streams[i]); int csr_size = csr_blocked_len[i + 1] - csr_blocked_len[i]; //#ifdef ACCELERATE // for (int j = 0; j < d_keys_idcs[i].size(); j++) { // int row = d_keys_idcs[i][j] / num_feat_1; // int col = d_keys_idcs[i][j] % num_feat_1; //#else for (int j = 0; j < h_keys_idcs[i].size(); j++) { int row = h_keys_idcs[i][j] / num_feat_1; int col = h_keys_idcs[i][j] % num_feat_1; //#endif cusparseDcsrmv( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2, csr_size, &alpha, descr, raw_pointer_cast(d_csr_val.data() + csr_blocked_len[i]), raw_pointer_cast(d_csr_row.data() + i * (num_feat_2 + 1)), raw_pointer_cast(d_csr_col.data() + csr_blocked_len[i]), raw_pointer_cast(eigen_vec_old.data()) + col * num_feat_2, &beta, raw_pointer_cast(eigen_vec_new.data()) + row * num_feat_2); } } double init = 0; norm = std::sqrt(transform_reduce(eigen_vec_new.begin(), eigen_vec_new.end(), square(), init, thrust::plus<double>())); transform(eigen_vec_new.begin(), eigen_vec_new.end(), eigen_vec_old.begin(), division(norm)); fill(eigen_vec_new.begin(), eigen_vec_new.end(), 0); } std::cout << "Eigen runtime: " << (clock() - begin_time2) / double(CLOCKS_PER_SEC) * 1000 << std::endl; // std::cout << "eigen values" << std::endl; // for (int i = 0; i < eigen_vec_old.size(); i++) { // std::cout << "eigen new value = " << eigen_vec_new[i] << ""; // std::cout << "eigen old value = " << eigen_vec_old[i] << std::endl; // } cusparseDestroy(handle); return 0; }
0ca7d3ebfee98d92f3f3e2c2bbdb0e1162752f0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/NestedTensorImpl.h> #include <ATen/TensorAccessor.h> #include <c10/util/Logging.h> #include <c10/util/bit_cast.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/NonSymbolicBC.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/native/hip/PersistentSoftmax.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <c10/hip/HIPMathCompat.h> #include <ATen/native/transformers/attention.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/transformers/hip/sdp_utils.h> #include <ATen/native/transformers/sdp_utils_cpp.h> #ifdef USE_FLASH_ATTENTION // FlashAttention Specific Imports #include <ATen/native/transformers/hip/flash_attn/fmha_api.h> // MemoryEfficient Attention Specific Imports #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> #include <ATen/native/transformers/hip/mem_eff_attention/kernels/cutlassF.h> #include <ATen/native/transformers/hip/mem_eff_attention/pytorch_utils.h> #endif namespace at { namespace native { namespace { static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4; template <typename scalar_t, typename accscalar_t, bool assume_aligned> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { // Same as above, but we can't vectorize memory access. for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; scalar_t qkv_q = qkv[b][t][d + 0 * D]; scalar_t qkv_k = qkv[b][t][d + 1 * D]; scalar_t qkv_v = qkv[b][t][d + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } template <typename scalar_t, typename accscalar_t, bool assume_aligned = false> __global__ void transform_bias_rescale_qkv_add_padding_kernel( // [B, T, 3 * D], but it's a NestedTensor buffer const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, const int* offsets, const int* input_sizes, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. const auto NH = q_k_v.size(2); const auto T = q_k_v.size(3); const auto DH = q_k_v.size(4); const auto t = blockIdx.x % T; const auto b = blockIdx.x / T; const auto D = NH * DH; const auto _3D = 3 * D; const auto offset_for_batch = offsets[b]; const auto input_dim = 1; const auto* sizes_i = input_sizes + b * input_dim; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; const auto first_item_offset = t * _3D + d; const auto last_item_offset = first_item_offset + VEC - 1; const bool first_item_in_bounds = first_item_offset < sizes_i[0]; const bool entire_vec_in_bounds = last_item_offset < sizes_i[0]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); if (entire_vec_in_bounds) { const auto offset = offset_for_batch + first_item_offset; *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } } else if (first_item_in_bounds) { const auto offset = offset_for_batch + first_item_offset; qkv_q[0] = qkv[offset + 0 * D]; qkv_k[0] = qkv[offset + 1 * D]; qkv_v[0] = qkv[offset + 2 * D]; qkv_q[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[0]) + static_cast<accscalar_t>(qkv_bias_q[0])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[0]) + static_cast<accscalar_t>(qkv_bias_k[0]))); qkv_v[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[0]) + static_cast<accscalar_t>(qkv_bias_v[0]))); #pragma unroll for (auto ii = 1; ii < VEC; ++ii) { const auto loop_offset = offset + ii; if (loop_offset < sizes_i[0]) { qkv_q[ii] = qkv[loop_offset + 0 * D]; qkv_k[ii] = qkv[loop_offset + 1 * D]; qkv_v[ii] = qkv[loop_offset + 2 * D]; qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } else { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } } else { #pragma unroll for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; const auto item_offset = t * _3D + d; const bool in_bounds = item_offset < sizes_i[0]; scalar_t qkv_q, qkv_k, qkv_v; if (in_bounds) { const auto qkv_offset = offset_for_batch + item_offset; qkv_q = qkv[qkv_offset + 0 * D]; qkv_k = qkv[qkv_offset + 1 * D]; qkv_v = qkv[qkv_offset + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); } else { qkv_q = 0; qkv_k = 0; qkv_v = 0; } q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } Tensor collapse_dims_1_and_2(const Tensor& sizes) { auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1); auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1); return (sizes_dim1 * sizes_dim2).contiguous(); } } // namespace // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias __host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda( const Tensor& qkv, const Tensor& qkv_bias, const int64_t num_head) { auto B = qkv.is_nested() ? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0) : qkv.size(0); // TODO: calculate this without the std::vector -- NestedTensor_to_mask wants // this too auto T = qkv.is_nested() ? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0] : qkv.size(1); if (qkv.is_nested()) { // Don't mess with non-nested case for now since it's not set up to fiddle // with mask size. // Round T up to next multiple of 8 so as to be able to utilize Tensor // cores. Otherwise, sometimes with padding, *no* row will have the maximum // sequence length and so we'll have a non-divisible-by-8 dimension even if // the model author chose a multiple of 8. T = T + (8 - (T % 8)) % 8; } auto _3D = qkv_bias.size(0); auto D = _3D / 3; TORCH_CHECK(D % num_head == 0); const auto dim_per_head = D / num_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options()); #define CALL_KERNEL(assume_aligned) \ hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \ , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) #define CALL_ADD_PADDING_KERNEL(assume_aligned) \ hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \ scalar_t, \ accscalar_t, \ assume_aligned>) \ , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ nt_qkv_buffer \ .packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ offsets_ptr, \ sizes_ptr, \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = ::max( std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1); auto blocks = B * T; const bool aligned = ((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) && ((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0); if (aligned) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( D % TRANSFORM_BIAS_RESCALE_VEC == 0, "D = num_heads * dim_per_head, so we should have dim_per_head % " "TRANSFORM_BIAS_RESCALE_VEC == 0 => " "D % TRANSFORM_BIAS_RESCALE_VEC == 0"); } if (qkv.is_nested()) { auto* nt_qkv = get_nested_tensor_impl(qkv); const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer(); auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes()); auto offsets = NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel()); at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel()) .copy_(sizes.reshape({-1})); auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true); const auto offsets_ptr = metadata.data_ptr<int>(); const auto sizes_ptr = offsets_ptr + sizes.numel() + 1; const auto input_dim = sizes.sizes()[1]; TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1); if (aligned && ((reinterpret_cast<intptr_t>(qkv.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0)) { CALL_ADD_PADDING_KERNEL(true); } else { CALL_ADD_PADDING_KERNEL(false); } } else if (aligned) { CALL_KERNEL(true); } else { CALL_KERNEL(false); } C10_HIP_KERNEL_LAUNCH_CHECK(); }); #undef CALL_ADD_PADDING_KERNEL #undef CALL_KERNEL auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } std::tuple<Tensor, Tensor> native_multi_head_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, const int64_t embed_dim, const int64_t num_head, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask, bool need_weights, bool average_attn_weights, const c10::optional<int64_t> mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK( !mask || !query.is_nested(), "NestedTensor with mask is not supported yet"); const auto D = embed_dim; TORCH_CHECK( query.dim() == 3, "expected 3-D `query`, got ", query.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || query.sizes()[2] == embed_dim, "passed-in embed_dim ", embed_dim, " didn't match last dim of query ", query.sizes()[2]); TORCH_CHECK( key.dim() == 3, "expected 3-D `key`, got ", key.dim(), "-D tensor"); TORCH_CHECK( value.dim() == 3, "expected 3-D `value`, got ", value.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || key.is_nested() || value.is_nested() || (query.sizes() == key.sizes() && key.sizes() == value.sizes()), "expected `query`/`key`/`value` shapes to match"); TORCH_CHECK( qkv_weight.dim() == 2, "expected 2-D `qkv_weight`, got ", qkv_weight.dim(), "-D tensor"); TORCH_CHECK( D * 3 == qkv_weight.sizes()[0], "expected `qkv_weight` first dim to be 3x embed_dim"); TORCH_CHECK( D == qkv_weight.sizes()[1], "expected `qkv_weight` second dim to be embed_Dim"); TORCH_CHECK( qkv_bias.dim() == 1, "expected 1-D `qkv_bias`, got ", qkv_bias.dim(), "-D tensor"); TORCH_CHECK( qkv_bias.sizes()[0] == 3 * D, "expected `qkv_bias` first dim and first dim of query to be equal"); TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`"); #ifndef NDEBUG const auto B = query.is_nested() ? get_nested_tensor_impl(query)->get_nested_sizes().size(0) : query.sizes()[0]; auto T = query.is_nested() ? 0 : query.sizes()[1]; #endif const auto dim_per_head = D / num_head; if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) { // We have not done linear projection yet but the input for SDP // Is expected to be 4 dimensional. We "cheaply" create view tensors // That will then be used for checking hot path conditions with select_sd_backend auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2); sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false}; auto backend = select_sdp_backend(kernel_params); // strides from packed projection for nested tensors when seq_len is 1 will be // and will trigger a contiguous call in the kernel, so we prevent this bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true; if (no_seq_len_1_nested && (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) { auto x = at::linear(query, qkv_weight, qkv_bias); auto chunks = x.chunk(3, -1); auto x_size_0 = x.size(0); chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); auto y = at::scaled_dot_product_attention( chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt); auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim}); return std::make_tuple( at::linear(past_sdp, proj_weight, proj_bias), Tensor()); } // Returned math or error lets not use it } // shape: [B, T, 3 x D] auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight); if (!qkv.is_nested() && qkv.numel() == 0) { if (query.is_nested()) { return std::make_tuple(Tensor(), Tensor()); } return std::make_tuple(at::empty_like(query), Tensor()); } #ifndef NDEBUG if (!query.is_nested() || !qkv.is_nested()) { if (query.is_nested()) { T = qkv.size(1); } debug_assert_shape(__LINE__, qkv, {B, T, 3 * D}); } #endif #ifdef DEBUG_PRINT_EACH_STEP if (!qkv.is_nested()) { std::cerr << "qkv: " << qkv << std::endl; } #endif // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head); qkv = Tensor(); // Not used any more, allow free auto& q = std::get<0>(q_k_v); const auto& k = std::get<1>(q_k_v); const auto& v = std::get<2>(q_k_v); #ifndef NDEBUG debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "q: " << q << std::endl; std::cerr << "k: " << k << std::endl; std::cerr << "v: " << v << std::endl; #endif // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // q & k are dead but cannot be freed because they were packed with v #ifndef NDEBUG debug_assert_shape(__LINE__, qkt, {B, num_head, T, T}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt: " << qkt << std::endl; #endif // shape: [B, num_head, T, T] // TODO: long-term, have a kernel that works with // NestedTensor directly if there is no mask passed qkt = masked_softmax(qkt, mask, query, mask_type); #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt after softmax: " << qkt << std::endl; #endif // shape: [B, num_head, T, dim_per_head] // reuse storage for q; we're done with it auto attn_ctx = bmm_nn(q, qkt, v); // qkv is not dead; we just reused storage for q! if (!need_weights) { qkt = Tensor(); } #ifndef NDEBUG debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "attn_ctx: " << attn_ctx << std::endl; #endif // shape: [B, T, D] // Fuse transform_0213 inside auto proj = transform0213_gemm_nt_bias( attn_ctx, proj_weight, proj_bias, query); #ifndef NDEBUG debug_assert_shape(__LINE__, proj, {B, T, D}); #endif if (need_weights && average_attn_weights) { // weights are not needed for full transformer, so don't worry too // much about performance -- we implement this just to make use // cases that don't disable need_weights still get some speedup. qkt = qkt.sum(1); qkt /= num_head; } return std::make_tuple(std::move(proj), std::move(qkt)); } std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional<double> scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) // Value (Batch x Num_heads x KV_seq_len x Dim_per_head) const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t max_seqlen_batch_q = query.size(2); const int64_t head_dim = query.size(3); const int64_t max_seqlen_batch_k = key.size(2); const int64_t max_seqlen_batch_v = value.size(2); TORCH_CHECK( max_seqlen_batch_k == max_seqlen_batch_v, "Key and Value must have the same sequence length"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor cumulative_sequence_length_q = at::arange( 0, (batch_size + 1) * max_seqlen_batch_q, max_seqlen_batch_q, TensorOptions().device(at::kCUDA).dtype(at::kInt)); Tensor cumulative_sequence_length_k = at::arange( 0, (batch_size + 1) * max_seqlen_batch_k, max_seqlen_batch_k, TensorOptions().device(at::kCUDA).dtype(at::kInt)); int64_t Nnz_q{batch_size * max_seqlen_batch_q}; int64_t Nnz_kv{batch_size * max_seqlen_batch_k}; // For the standard MHA these will actually be views Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim}); Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor attention, log_sumexp, debug_attn_mask, philox_seed, philox_offset; std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) = at::_flash_attention_forward( query_reshaped, key_reshaped, value_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, is_causal, return_debug_mask, scale); // Reshape output to convert nnz to batch_size and seq_len attention = attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2); return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask); } std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, bool compute_log_sumexp, bool is_causal, c10::optional<double> scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); sdp::CustomMaskType custom_mask_type = is_causal ? sdp::CustomMaskType::CausalFromTopLeft : sdp::CustomMaskType::NoCustomMask; Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_efficient_attention_forward( q_t, k_t, v_t, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt, 0.0 /*dropout_p*/, static_cast<int64_t>(custom_mask_type), compute_log_sumexp, scale); attention = attention.transpose(1, 2); return std::make_tuple(std::move(attention), std::move(log_sumexp)); } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { TORCH_CHECK( false, "No viable backend for scaled_dot_product_attention was found. ", "This is likely due to turning off both the math kernel and the fused kernels."); } return static_cast<int64_t>(backend); } std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& cumulative_sequence_length_q, const Tensor& cumulative_sequence_length_k, const int64_t max_seqlen_batch_q, const int64_t max_seqlen_batch_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional<double> scale) { #if defined(USE_FLASH_ATTENTION) /* num_splits determines how much to parallelize over the seqlen_q dimension num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. We will hard code it to 0 for now */ constexpr int num_splits{0}; const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked(); at::Tensor output = at::empty_like(query); Tensor logsumexp, debug_attn_mask, philox_seed, philox_offset; std::tie(logsumexp, philox_seed, philox_offset, debug_attn_mask) = pytorch_fmha::mha_fwd( query, key, value, output, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, softmax_scale, false, /*zero_tensors = false for all calls here*/ is_causal, return_debug_mask, /*return_softmax (this is used for testing)*/ num_splits); debug_attn_mask = return_debug_mask ? debug_attn_mask : at::empty({0}, query.options()); return std::make_tuple(output, logsumexp, philox_seed, philox_offset, debug_attn_mask); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor(), Tensor(), Tensor(), Tensor(), Tensor()); } std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward( const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b const c10::optional<at::Tensor>& seqstart_q, // (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the // position of the first key token for batch $b const c10::optional<at::Tensor>& seqstart_k, // (Mode 1MHK only) Maximum sequence length across batches const c10::optional<int64_t> max_seqlen_q_, double dropout_p, // attention matrix dropout probability int64_t custom_mask_type, bool compute_logsumexp, c10::optional<double> scale, const c10::optional<at::Tensor>& causal_diagonal, const c10::optional<at::Tensor>& seqlen_k) { #if defined(USE_FLASH_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since // this would avoid runtime architecture checks, we should look into it TORCH_CHECK(query.dim() == 4); TORCH_CHECK(key.dim() == 4); TORCH_CHECK(value.dim() == 4); // Batch sizes TORCH_CHECK(query.size(0) == key.size(0)); TORCH_CHECK(query.size(0) == value.size(0)); // Sequence length TORCH_CHECK(key.size(1) == value.size(1)); // Num heads TORCH_CHECK(query.size(2) == key.size(2)); TORCH_CHECK(query.size(2) == value.size(2)); // Embedding per head TORCH_CHECK(query.size(3) == key.size(3)); // TODO_DRISS we should return max_seqlen_k; int64_t max_seqlen_q, max_seqlen_k; TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value()); if (seqstart_q.has_value()) { TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int); TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int); TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q)); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k)); TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0)); TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1"); TORCH_CHECK(max_seqlen_q_.has_value()); max_seqlen_q = *max_seqlen_q_; max_seqlen_k = 0; // Will be set inside the kernel } else { max_seqlen_q = query.size(1); max_seqlen_k = key.size(1); } CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value); at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int64_t B = query.size(0); int64_t M = query.size(1); int64_t N = key.size(1); int64_t num_heads = query.size(-2); int64_t K = query.size(-1); int64_t Kv = value.size(-1); at::Tensor res; at::Tensor logsumexp; const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO; at::PhiloxCudaState rng_engine_inputs; if (use_dropout) { at::CUDAGeneratorImpl* gen = at::get_generator_or_default<at::CUDAGeneratorImpl>( c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator()); std::lock_guard<std::mutex> lock(gen->mutex_); // if using dropout, we produce 1 random number for each element of the // attention tensor rng_engine_inputs = gen->philox_cuda_state(B * num_heads * M * N); } hipDeviceProp_t* p = at::cuda::getDeviceProperties(query.device().index()); const int computeCapability = p->major * 10 + p->minor; bool kernel_launched = false; const auto maxShmem = p->sharedMemPerBlockOptin; auto launchKernel = [&](auto _k, auto kernel_fn) { using Kernel = decltype(_k); using scalar_t = typename Kernel::scalar_t; (void)_k; if (kernel_launched) { return; } // Check if this kernel is compatible if (!Kernel::kSupportsDropout && use_dropout) { return; } if (!Kernel::kSupportsBias && bias.has_value()) { return; } if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) { return; } // Alignment if ((query.stride(2) % Kernel::kAlignmentQ) || (key.stride(2) % Kernel::kAlignmentK) || (value.stride(2) % Kernel::kAlignmentV)) { return; } // Uses too much shmem size_t smem_bytes = sizeof(typename Kernel::SharedStorage); if (smem_bytes > maxShmem) { return; } kernel_launched = true; res = at::empty( {B, M, num_heads, Kv}, query.options().dtype( CutlassToAtenDtype<typename Kernel::output_t>::atScalarType())); // NOTE: Should be aligned (by padding) in case M is // not a good number for loading during backward constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE; logsumexp = at::empty( {seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B, num_heads, compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0}, query.options().dtype(at::ScalarType::Float)); typename Kernel::Params p; p.query_ptr = (scalar_t*)query.data_ptr(); p.key_ptr = (scalar_t*)key.data_ptr(); p.value_ptr = (scalar_t*)value.data_ptr(); p.logsumexp_ptr = compute_logsumexp ? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr() : nullptr; at::Tensor output_accum; if (Kernel::kNeedsOutputAccumulatorBuffer) { output_accum = at::empty( {B, M, num_heads, Kv}, query.options().dtype( CutlassToAtenDtype< typename Kernel::output_accum_t>::atScalarType())); p.output_accum_ptr = (typename Kernel::output_accum_t*)output_accum.data_ptr(); } else { p.output_accum_ptr = nullptr; } p.output_ptr = (typename Kernel::output_t*)res.data_ptr(); if (seqstart_q.has_value()) { p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr(); p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr(); } p.num_heads = num_heads; p.head_dim = query.size(3); p.head_dim_value = value.size(3); p.num_queries = max_seqlen_q; p.num_keys = max_seqlen_k; p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B; p.custom_mask_type = custom_mask_type; p.causal_diagonal_ptr = nullptr; if (causal_diagonal.has_value()) { CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value()); TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int); p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr(); } p.seqlen_k_ptr = nullptr; if (seqlen_k.has_value()) { CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value()); TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int); p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr(); } p.scale = sdp::calculate_scale(query, scale).as_float_unchecked(); ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0)); ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0)); ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0)); ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1)); ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1)); ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1)); ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2)); ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2)); ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2)); ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1)); if (bias.has_value()) { CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias)); TORCH_CHECK( bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(), "invalid dtype for bias - should match query's dtype"); p.attn_bias_ptr = (scalar_t*)bias->data_ptr(); // assign strides for bias, viewed as // (batch_sz, n_heads, n_queries, n_keys) const at::Tensor bias_4d_view = get_bias_4d_view(*bias, B, num_heads, M, N); ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0)); ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1)); ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2)); } p.use_dropout = use_dropout; if (p.use_dropout) { p.rng_engine_inputs = rng_engine_inputs; p.dropout_prob = dropout_p; } if (smem_bytes > 0xc000) { auto err = hipFuncSetAttribute( kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); TORCH_CHECK( err != hipErrorInvalidValue, "This GPU does not have enough shared-memory (kernel requires ", smem_bytes / 1024, " kb)"); AT_CUDA_CHECK(err); } Kernel::check_supported(p); hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p); }; // Dispatch to the right kernel DISPATCH_TYPES(query, ([&]() { dispatch_cutlassF<scalar_t>(launchKernel, computeCapability); })); TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!"); AT_CUDA_CHECK(hipGetLastError()); // !!TODO_DRISS: We are throwing this away for now and need to change how its done // uint64_t -> int64_t bitwise casting as PyTorch don't support uint64_t // so just fake it as a int64_t int64_t seed, offset; if (use_dropout) { std::memcpy(&seed, &rng_engine_inputs.seed_, sizeof(seed)); std::memcpy(&offset, &rng_engine_inputs.offset_.val, sizeof(offset)); } return std::make_tuple(res, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor{}, Tensor{}); } Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){ TORCH_CHECK(false, "This operator should be overridden in python before use"); return at::Tensor(); } REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda); // !!This function is deprecated. See FunctionsManual.cpp for the implementation!! bool _chunk_grad_outputs_efficient_attention( const Tensor& query, const Tensor& key, const Tensor& value, bool is_causal) { int64_t M = query.size(2); int64_t N = key.size(2); bool grad_kv_needs_init = is_causal && N > M; bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage()); bool equal_seq_len = query.size(2) == key.size(2); bool q_v_same_head_dim = query.size(3) == value.size(3); bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased); return chunk_grad_outputs; } } // namespace native } // namespace at
0ca7d3ebfee98d92f3f3e2c2bbdb0e1162752f0c.cu
#include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/NestedTensorImpl.h> #include <ATen/TensorAccessor.h> #include <c10/util/Logging.h> #include <c10/util/bit_cast.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/NonSymbolicBC.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/PersistentSoftmax.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/native/transformers/attention.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/transformers/cuda/sdp_utils.h> #include <ATen/native/transformers/sdp_utils_cpp.h> #ifdef USE_FLASH_ATTENTION // FlashAttention Specific Imports #include <ATen/native/transformers/cuda/flash_attn/fmha_api.h> // MemoryEfficient Attention Specific Imports #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> #include <ATen/native/transformers/cuda/mem_eff_attention/kernels/cutlassF.h> #include <ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h> #endif namespace at { namespace native { namespace { static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4; template <typename scalar_t, typename accscalar_t, bool assume_aligned> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { // Same as above, but we can't vectorize memory access. for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; scalar_t qkv_q = qkv[b][t][d + 0 * D]; scalar_t qkv_k = qkv[b][t][d + 1 * D]; scalar_t qkv_v = qkv[b][t][d + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } template <typename scalar_t, typename accscalar_t, bool assume_aligned = false> __global__ void transform_bias_rescale_qkv_add_padding_kernel( // [B, T, 3 * D], but it's a NestedTensor buffer const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, const int* offsets, const int* input_sizes, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. const auto NH = q_k_v.size(2); const auto T = q_k_v.size(3); const auto DH = q_k_v.size(4); const auto t = blockIdx.x % T; const auto b = blockIdx.x / T; const auto D = NH * DH; const auto _3D = 3 * D; const auto offset_for_batch = offsets[b]; const auto input_dim = 1; const auto* sizes_i = input_sizes + b * input_dim; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; const auto first_item_offset = t * _3D + d; const auto last_item_offset = first_item_offset + VEC - 1; const bool first_item_in_bounds = first_item_offset < sizes_i[0]; const bool entire_vec_in_bounds = last_item_offset < sizes_i[0]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); if (entire_vec_in_bounds) { const auto offset = offset_for_batch + first_item_offset; *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } } else if (first_item_in_bounds) { const auto offset = offset_for_batch + first_item_offset; qkv_q[0] = qkv[offset + 0 * D]; qkv_k[0] = qkv[offset + 1 * D]; qkv_v[0] = qkv[offset + 2 * D]; qkv_q[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[0]) + static_cast<accscalar_t>(qkv_bias_q[0])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[0]) + static_cast<accscalar_t>(qkv_bias_k[0]))); qkv_v[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[0]) + static_cast<accscalar_t>(qkv_bias_v[0]))); #pragma unroll for (auto ii = 1; ii < VEC; ++ii) { const auto loop_offset = offset + ii; if (loop_offset < sizes_i[0]) { qkv_q[ii] = qkv[loop_offset + 0 * D]; qkv_k[ii] = qkv[loop_offset + 1 * D]; qkv_v[ii] = qkv[loop_offset + 2 * D]; qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } else { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } } else { #pragma unroll for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; const auto item_offset = t * _3D + d; const bool in_bounds = item_offset < sizes_i[0]; scalar_t qkv_q, qkv_k, qkv_v; if (in_bounds) { const auto qkv_offset = offset_for_batch + item_offset; qkv_q = qkv[qkv_offset + 0 * D]; qkv_k = qkv[qkv_offset + 1 * D]; qkv_v = qkv[qkv_offset + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); } else { qkv_q = 0; qkv_k = 0; qkv_v = 0; } q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } Tensor collapse_dims_1_and_2(const Tensor& sizes) { auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1); auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1); return (sizes_dim1 * sizes_dim2).contiguous(); } } // namespace // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias __host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda( const Tensor& qkv, const Tensor& qkv_bias, const int64_t num_head) { auto B = qkv.is_nested() ? get_nested_tensor_impl(qkv)->get_nested_sizes().size(0) : qkv.size(0); // TODO: calculate this without the std::vector -- NestedTensor_to_mask wants // this too auto T = qkv.is_nested() ? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0] : qkv.size(1); if (qkv.is_nested()) { // Don't mess with non-nested case for now since it's not set up to fiddle // with mask size. // Round T up to next multiple of 8 so as to be able to utilize Tensor // cores. Otherwise, sometimes with padding, *no* row will have the maximum // sequence length and so we'll have a non-divisible-by-8 dimension even if // the model author chose a multiple of 8. T = T + (8 - (T % 8)) % 8; } auto _3D = qkv_bias.size(0); auto D = _3D / 3; TORCH_CHECK(D % num_head == 0); const auto dim_per_head = D / num_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options()); #define CALL_KERNEL(assume_aligned) \ transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \ qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) #define CALL_ADD_PADDING_KERNEL(assume_aligned) \ transform_bias_rescale_qkv_add_padding_kernel< \ scalar_t, \ accscalar_t, \ assume_aligned> \ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \ nt_qkv_buffer \ .packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ offsets_ptr, \ sizes_ptr, \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = std::max( std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1); auto blocks = B * T; const bool aligned = ((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) && ((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0); if (aligned) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( D % TRANSFORM_BIAS_RESCALE_VEC == 0, "D = num_heads * dim_per_head, so we should have dim_per_head % " "TRANSFORM_BIAS_RESCALE_VEC == 0 => " "D % TRANSFORM_BIAS_RESCALE_VEC == 0"); } if (qkv.is_nested()) { auto* nt_qkv = get_nested_tensor_impl(qkv); const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer(); auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_sizes()); auto offsets = NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel()); at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel()) .copy_(sizes.reshape({-1})); auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true); const auto offsets_ptr = metadata.data_ptr<int>(); const auto sizes_ptr = offsets_ptr + sizes.numel() + 1; const auto input_dim = sizes.sizes()[1]; TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1); if (aligned && ((reinterpret_cast<intptr_t>(qkv.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0)) { CALL_ADD_PADDING_KERNEL(true); } else { CALL_ADD_PADDING_KERNEL(false); } } else if (aligned) { CALL_KERNEL(true); } else { CALL_KERNEL(false); } C10_CUDA_KERNEL_LAUNCH_CHECK(); }); #undef CALL_ADD_PADDING_KERNEL #undef CALL_KERNEL auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } std::tuple<Tensor, Tensor> native_multi_head_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, const int64_t embed_dim, const int64_t num_head, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask, bool need_weights, bool average_attn_weights, const c10::optional<int64_t> mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK( !mask || !query.is_nested(), "NestedTensor with mask is not supported yet"); const auto D = embed_dim; TORCH_CHECK( query.dim() == 3, "expected 3-D `query`, got ", query.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || query.sizes()[2] == embed_dim, "passed-in embed_dim ", embed_dim, " didn't match last dim of query ", query.sizes()[2]); TORCH_CHECK( key.dim() == 3, "expected 3-D `key`, got ", key.dim(), "-D tensor"); TORCH_CHECK( value.dim() == 3, "expected 3-D `value`, got ", value.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || key.is_nested() || value.is_nested() || (query.sizes() == key.sizes() && key.sizes() == value.sizes()), "expected `query`/`key`/`value` shapes to match"); TORCH_CHECK( qkv_weight.dim() == 2, "expected 2-D `qkv_weight`, got ", qkv_weight.dim(), "-D tensor"); TORCH_CHECK( D * 3 == qkv_weight.sizes()[0], "expected `qkv_weight` first dim to be 3x embed_dim"); TORCH_CHECK( D == qkv_weight.sizes()[1], "expected `qkv_weight` second dim to be embed_Dim"); TORCH_CHECK( qkv_bias.dim() == 1, "expected 1-D `qkv_bias`, got ", qkv_bias.dim(), "-D tensor"); TORCH_CHECK( qkv_bias.sizes()[0] == 3 * D, "expected `qkv_bias` first dim and first dim of query to be equal"); TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`"); #ifndef NDEBUG const auto B = query.is_nested() ? get_nested_tensor_impl(query)->get_nested_sizes().size(0) : query.sizes()[0]; auto T = query.is_nested() ? 0 : query.sizes()[1]; #endif const auto dim_per_head = D / num_head; if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) { // We have not done linear projection yet but the input for SDP // Is expected to be 4 dimensional. We "cheaply" create view tensors // That will then be used for checking hot path conditions with select_sd_backend auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2); sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false}; auto backend = select_sdp_backend(kernel_params); // strides from packed projection for nested tensors when seq_len is 1 will be // and will trigger a contiguous call in the kernel, so we prevent this bool no_seq_len_1_nested = query.is_nested() ? check_for_seq_len_1_nested_tensor(kernel_params, false) : true; if (no_seq_len_1_nested && (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention)) { auto x = at::linear(query, qkv_weight, qkv_bias); auto chunks = x.chunk(3, -1); auto x_size_0 = x.size(0); chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); auto y = at::scaled_dot_product_attention( chunks[0], chunks[1], chunks[2], mask, 0.0, false, c10::nullopt); auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim}); return std::make_tuple( at::linear(past_sdp, proj_weight, proj_bias), Tensor()); } // Returned math or error lets not use it } // shape: [B, T, 3 x D] auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight); if (!qkv.is_nested() && qkv.numel() == 0) { if (query.is_nested()) { return std::make_tuple(Tensor(), Tensor()); } return std::make_tuple(at::empty_like(query), Tensor()); } #ifndef NDEBUG if (!query.is_nested() || !qkv.is_nested()) { if (query.is_nested()) { T = qkv.size(1); } debug_assert_shape(__LINE__, qkv, {B, T, 3 * D}); } #endif #ifdef DEBUG_PRINT_EACH_STEP if (!qkv.is_nested()) { std::cerr << "qkv: " << qkv << std::endl; } #endif // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head); qkv = Tensor(); // Not used any more, allow free auto& q = std::get<0>(q_k_v); const auto& k = std::get<1>(q_k_v); const auto& v = std::get<2>(q_k_v); #ifndef NDEBUG debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "q: " << q << std::endl; std::cerr << "k: " << k << std::endl; std::cerr << "v: " << v << std::endl; #endif // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // q & k are dead but cannot be freed because they were packed with v #ifndef NDEBUG debug_assert_shape(__LINE__, qkt, {B, num_head, T, T}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt: " << qkt << std::endl; #endif // shape: [B, num_head, T, T] // TODO: long-term, have a kernel that works with // NestedTensor directly if there is no mask passed qkt = masked_softmax(qkt, mask, query, mask_type); #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt after softmax: " << qkt << std::endl; #endif // shape: [B, num_head, T, dim_per_head] // reuse storage for q; we're done with it auto attn_ctx = bmm_nn(q, qkt, v); // qkv is not dead; we just reused storage for q! if (!need_weights) { qkt = Tensor(); } #ifndef NDEBUG debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "attn_ctx: " << attn_ctx << std::endl; #endif // shape: [B, T, D] // Fuse transform_0213 inside auto proj = transform0213_gemm_nt_bias( attn_ctx, proj_weight, proj_bias, query); #ifndef NDEBUG debug_assert_shape(__LINE__, proj, {B, T, D}); #endif if (need_weights && average_attn_weights) { // weights are not needed for full transformer, so don't worry too // much about performance -- we implement this just to make use // cases that don't disable need_weights still get some speedup. qkt = qkt.sum(1); qkt /= num_head; } return std::make_tuple(std::move(proj), std::move(qkt)); } std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor, Tensor, Tensor> _scaled_dot_product_flash_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional<double> scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) // Value (Batch x Num_heads x KV_seq_len x Dim_per_head) const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t max_seqlen_batch_q = query.size(2); const int64_t head_dim = query.size(3); const int64_t max_seqlen_batch_k = key.size(2); const int64_t max_seqlen_batch_v = value.size(2); TORCH_CHECK( max_seqlen_batch_k == max_seqlen_batch_v, "Key and Value must have the same sequence length"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor cumulative_sequence_length_q = at::arange( 0, (batch_size + 1) * max_seqlen_batch_q, max_seqlen_batch_q, TensorOptions().device(at::kCUDA).dtype(at::kInt)); Tensor cumulative_sequence_length_k = at::arange( 0, (batch_size + 1) * max_seqlen_batch_k, max_seqlen_batch_k, TensorOptions().device(at::kCUDA).dtype(at::kInt)); int64_t Nnz_q{batch_size * max_seqlen_batch_q}; int64_t Nnz_kv{batch_size * max_seqlen_batch_k}; // For the standard MHA these will actually be views Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim}); Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor attention, log_sumexp, debug_attn_mask, philox_seed, philox_offset; std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) = at::_flash_attention_forward( query_reshaped, key_reshaped, value_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, is_causal, return_debug_mask, scale); // Reshape output to convert nnz to batch_size and seq_len attention = attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2); return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask); } std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, bool compute_log_sumexp, bool is_causal, c10::optional<double> scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); sdp::CustomMaskType custom_mask_type = is_causal ? sdp::CustomMaskType::CausalFromTopLeft : sdp::CustomMaskType::NoCustomMask; Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_efficient_attention_forward( q_t, k_t, v_t, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt, 0.0 /*dropout_p*/, static_cast<int64_t>(custom_mask_type), compute_log_sumexp, scale); attention = attention.transpose(1, 2); return std::make_tuple(std::move(attention), std::move(log_sumexp)); } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { TORCH_CHECK( false, "No viable backend for scaled_dot_product_attention was found. ", "This is likely due to turning off both the math kernel and the fused kernels."); } return static_cast<int64_t>(backend); } std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& cumulative_sequence_length_q, const Tensor& cumulative_sequence_length_k, const int64_t max_seqlen_batch_q, const int64_t max_seqlen_batch_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional<double> scale) { #if defined(USE_FLASH_ATTENTION) /* num_splits determines how much to parallelize over the seqlen_q dimension num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. We will hard code it to 0 for now */ constexpr int num_splits{0}; const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked(); at::Tensor output = at::empty_like(query); Tensor logsumexp, debug_attn_mask, philox_seed, philox_offset; std::tie(logsumexp, philox_seed, philox_offset, debug_attn_mask) = pytorch_fmha::mha_fwd( query, key, value, output, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, softmax_scale, false, /*zero_tensors = false for all calls here*/ is_causal, return_debug_mask, /*return_softmax (this is used for testing)*/ num_splits); debug_attn_mask = return_debug_mask ? debug_attn_mask : at::empty({0}, query.options()); return std::make_tuple(output, logsumexp, philox_seed, philox_offset, debug_attn_mask); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor(), Tensor(), Tensor(), Tensor(), Tensor()); } std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward( const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] const c10::optional<at::Tensor>& bias, // [b, num_heads, seqlen, seqlen] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b const c10::optional<at::Tensor>& seqstart_q, // (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the // position of the first key token for batch $b const c10::optional<at::Tensor>& seqstart_k, // (Mode 1MHK only) Maximum sequence length across batches const c10::optional<int64_t> max_seqlen_q_, double dropout_p, // attention matrix dropout probability int64_t custom_mask_type, bool compute_logsumexp, c10::optional<double> scale, const c10::optional<at::Tensor>& causal_diagonal, const c10::optional<at::Tensor>& seqlen_k) { #if defined(USE_FLASH_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since // this would avoid runtime architecture checks, we should look into it TORCH_CHECK(query.dim() == 4); TORCH_CHECK(key.dim() == 4); TORCH_CHECK(value.dim() == 4); // Batch sizes TORCH_CHECK(query.size(0) == key.size(0)); TORCH_CHECK(query.size(0) == value.size(0)); // Sequence length TORCH_CHECK(key.size(1) == value.size(1)); // Num heads TORCH_CHECK(query.size(2) == key.size(2)); TORCH_CHECK(query.size(2) == value.size(2)); // Embedding per head TORCH_CHECK(query.size(3) == key.size(3)); // TODO_DRISS we should return max_seqlen_k; int64_t max_seqlen_q, max_seqlen_k; TORCH_CHECK(seqstart_q.has_value() == seqstart_k.has_value()); if (seqstart_q.has_value()) { TORCH_CHECK(seqstart_q->scalar_type() == at::ScalarType::Int); TORCH_CHECK(seqstart_k->scalar_type() == at::ScalarType::Int); TORCH_CHECK(seqstart_q->dim() == 1 && seqstart_k->dim() == 1); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_q)); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*seqstart_k)); TORCH_CHECK(seqstart_q->size(0) == seqstart_k->size(0)); TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1"); TORCH_CHECK(max_seqlen_q_.has_value()); max_seqlen_q = *max_seqlen_q_; max_seqlen_k = 0; // Will be set inside the kernel } else { max_seqlen_q = query.size(1); max_seqlen_k = key.size(1); } CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value); at::cuda::CUDAGuard device_guard(query.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int64_t B = query.size(0); int64_t M = query.size(1); int64_t N = key.size(1); int64_t num_heads = query.size(-2); int64_t K = query.size(-1); int64_t Kv = value.size(-1); at::Tensor res; at::Tensor logsumexp; const bool use_dropout = std::fpclassify(dropout_p) != FP_ZERO; at::PhiloxCudaState rng_engine_inputs; if (use_dropout) { at::CUDAGeneratorImpl* gen = at::get_generator_or_default<at::CUDAGeneratorImpl>( c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator()); std::lock_guard<std::mutex> lock(gen->mutex_); // if using dropout, we produce 1 random number for each element of the // attention tensor rng_engine_inputs = gen->philox_cuda_state(B * num_heads * M * N); } cudaDeviceProp* p = at::cuda::getDeviceProperties(query.device().index()); const int computeCapability = p->major * 10 + p->minor; bool kernel_launched = false; const auto maxShmem = p->sharedMemPerBlockOptin; auto launchKernel = [&](auto _k, auto kernel_fn) { using Kernel = decltype(_k); using scalar_t = typename Kernel::scalar_t; (void)_k; if (kernel_launched) { return; } // Check if this kernel is compatible if (!Kernel::kSupportsDropout && use_dropout) { return; } if (!Kernel::kSupportsBias && bias.has_value()) { return; } if (value.size(3) > Kernel::kMaxK || key.size(3) > Kernel::kMaxK) { return; } // Alignment if ((query.stride(2) % Kernel::kAlignmentQ) || (key.stride(2) % Kernel::kAlignmentK) || (value.stride(2) % Kernel::kAlignmentV)) { return; } // Uses too much shmem size_t smem_bytes = sizeof(typename Kernel::SharedStorage); if (smem_bytes > maxShmem) { return; } kernel_launched = true; res = at::empty( {B, M, num_heads, Kv}, query.options().dtype( CutlassToAtenDtype<typename Kernel::output_t>::atScalarType())); // NOTE: Should be aligned (by padding) in case M is // not a good number for loading during backward constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE; logsumexp = at::empty( {seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B, num_heads, compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0}, query.options().dtype(at::ScalarType::Float)); typename Kernel::Params p; p.query_ptr = (scalar_t*)query.data_ptr(); p.key_ptr = (scalar_t*)key.data_ptr(); p.value_ptr = (scalar_t*)value.data_ptr(); p.logsumexp_ptr = compute_logsumexp ? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr() : nullptr; at::Tensor output_accum; if (Kernel::kNeedsOutputAccumulatorBuffer) { output_accum = at::empty( {B, M, num_heads, Kv}, query.options().dtype( CutlassToAtenDtype< typename Kernel::output_accum_t>::atScalarType())); p.output_accum_ptr = (typename Kernel::output_accum_t*)output_accum.data_ptr(); } else { p.output_accum_ptr = nullptr; } p.output_ptr = (typename Kernel::output_t*)res.data_ptr(); if (seqstart_q.has_value()) { p.seqstart_q_ptr = (int32_t*)seqstart_q->data_ptr(); p.seqstart_k_ptr = (int32_t*)seqstart_k->data_ptr(); } p.num_heads = num_heads; p.head_dim = query.size(3); p.head_dim_value = value.size(3); p.num_queries = max_seqlen_q; p.num_keys = max_seqlen_k; p.num_batches = seqstart_q.has_value() ? seqstart_q->size(0) - 1 : B; p.custom_mask_type = custom_mask_type; p.causal_diagonal_ptr = nullptr; if (causal_diagonal.has_value()) { CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(causal_diagonal.value()); TORCH_CHECK(causal_diagonal->scalar_type() == at::ScalarType::Int); p.causal_diagonal_ptr = (int32_t*)causal_diagonal->data_ptr(); } p.seqlen_k_ptr = nullptr; if (seqlen_k.has_value()) { CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(seqlen_k.value()); TORCH_CHECK(seqlen_k->scalar_type() == at::ScalarType::Int); p.seqlen_k_ptr = (int32_t*)seqlen_k->data_ptr(); } p.scale = sdp::calculate_scale(query, scale).as_float_unchecked(); ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0)); ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0)); ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0)); ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1)); ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1)); ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1)); ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2)); ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2)); ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2)); ASSIGN_CHECK_OVERFLOW(p.o_strideM, res.stride(1)); if (bias.has_value()) { CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA((*bias)); TORCH_CHECK( bias->scalar_type() == CutlassToAtenDtype<scalar_t>::atScalarType(), "invalid dtype for bias - should match query's dtype"); p.attn_bias_ptr = (scalar_t*)bias->data_ptr(); // assign strides for bias, viewed as // (batch_sz, n_heads, n_queries, n_keys) const at::Tensor bias_4d_view = get_bias_4d_view(*bias, B, num_heads, M, N); ASSIGN_CHECK_OVERFLOW(p.bias_strideB, bias_4d_view.stride(0)); ASSIGN_CHECK_OVERFLOW(p.bias_strideH, bias_4d_view.stride(1)); ASSIGN_CHECK_OVERFLOW(p.bias_strideM, bias_4d_view.stride(2)); } p.use_dropout = use_dropout; if (p.use_dropout) { p.rng_engine_inputs = rng_engine_inputs; p.dropout_prob = dropout_p; } if (smem_bytes > 0xc000) { auto err = cudaFuncSetAttribute( kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); TORCH_CHECK( err != cudaErrorInvalidValue, "This GPU does not have enough shared-memory (kernel requires ", smem_bytes / 1024, " kb)"); AT_CUDA_CHECK(err); } Kernel::check_supported(p); kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p); }; // Dispatch to the right kernel DISPATCH_TYPES(query, ([&]() { dispatch_cutlassF<scalar_t>(launchKernel, computeCapability); })); TORCH_CHECK(kernel_launched, "cutlassF: no kernel found to launch!"); AT_CUDA_CHECK(cudaGetLastError()); // !!TODO_DRISS: We are throwing this away for now and need to change how its done // uint64_t -> int64_t bitwise casting as PyTorch don't support uint64_t // so just fake it as a int64_t int64_t seed, offset; if (use_dropout) { std::memcpy(&seed, &rng_engine_inputs.seed_, sizeof(seed)); std::memcpy(&offset, &rng_engine_inputs.offset_.val, sizeof(offset)); } return std::make_tuple(res, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor{}, Tensor{}); } Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){ TORCH_CHECK(false, "This operator should be overridden in python before use"); return at::Tensor(); } REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda); // !!This function is deprecated. See FunctionsManual.cpp for the implementation!! bool _chunk_grad_outputs_efficient_attention( const Tensor& query, const Tensor& key, const Tensor& value, bool is_causal) { int64_t M = query.size(2); int64_t N = key.size(2); bool grad_kv_needs_init = is_causal && N > M; bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage()); bool equal_seq_len = query.size(2) == key.size(2); bool q_v_same_head_dim = query.size(3) == value.size(3); bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased); return chunk_grad_outputs; } } // namespace native } // namespace at
4db4206f93b3f4af435bbb2894b5733f227f12b6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // forward propogation /* def forwardBackward(X, y, W, v): Z_trans = relu(W@X.T) # mat-mat Z = Z_trans.T # trans yhat = Z@v # mat-vec error = y - yhat grad_v = Z.T @ error # mat-vector grad_Z = np.outer(error, v) # outer product grad_p = dRelu(dZ, Z) grad_W = dp.T @ X # mat-mat */ /* Parameter Setup */ #define N 4 // # of input samples #define D 2 // # of input neurons #define K 3 // # of hidden neurons #define STEP 0.001 // learning rate or step size // X: input matrix (n * d) #define X_HEIGHT N #define X_WIDTH D #define X_N X_HEIGHT * X_WIDTH // Z: ifmap matrix (n * k) #define Z_HEIGHT N #define Z_WIDTH K #define Z_N Z_HEIGHT * Z_WIDTH // W: layer 1 weights (k * d) #define W_HEIGHT K #define W_WIDTH D #define W_N W_HEIGHT * W_WIDTH // v: layer 2 weights #define V_HEIGHT K #define V_WIDTH 1 #define V_N V_HEIGHT * V_WIDTH #define BLOCK_SIZE 32 #define LINEAR_BLOCK_SIZE BLOCK_SIZE * BLOCK_SIZE #define MAX_ERR 1e-6 __global__ void matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = sum; } } __global__ void relu_matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = (sum>0)?sum:0; } } __global__ void d_relu_matrix_mul(double *d_C, double *d_A, double *d_B, double *d_act, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = (d_act[rid * d_b_width + cid]>0)?sum:0; } } __global__ void matrix_transpose(double *d_out, double *d_in, int d_in_width, int d_out_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(cid < d_in_width && rid < d_out_width){ d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid]; } } __global__ void vector_sub(double *out, double *a, double *b, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = a[tid] - b[tid]; } } __global__ void update(double *d_weights, double *d_grads, double step, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ d_weights[tid] -= step * d_grads[tid]; } } __global__ void square(double *out, double *in, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = in[tid] * in[tid]; } } int main(){ // forward variables double *h_X, *h_X_T, *h_W, *h_Z, *h_Z_T, *h_v, *h_yhat, *h_y; double *d_X, *d_X_T, *d_W, *d_Z, *d_Z_T, *d_v, *d_yhat, *d_y; // backward variables double *h_error, *h_grad_v, *h_grad_Z, *h_grad_p_T, *h_grad_W, *h_err_sq; double *d_error, *d_grad_v, *d_grad_Z, *d_grad_p_T, *d_grad_W, *d_err_sq; // double *h_ref; // compute verified results // Allocate host memory h_X = (double*)malloc(sizeof(double) * X_N); h_W = (double*)malloc(sizeof(double) * W_N); h_v = (double*)malloc(sizeof(double) * V_N); h_Z_T = (double*)malloc(sizeof(double) * Z_N); h_Z = (double*)malloc(sizeof(double) * Z_N); h_yhat = (double*)malloc(sizeof(double) * N); h_y = (double*)malloc(sizeof(double) * N); h_error = (double*)malloc(sizeof(double) * N); h_grad_v = (double*)malloc(sizeof(double) * V_N); h_grad_Z = (double*)malloc(sizeof(double) * Z_N); h_grad_p_T = (double*)malloc(sizeof(double) * Z_N); h_grad_W = (double*)malloc(sizeof(double) * W_N); h_err_sq = (double*)malloc(sizeof(double) * N); // h_ref = (double*)malloc(sizeof(double) * N); // Initialize host arrays /*** TEST 1 ***/ for(int i = 0; i < X_N; i++){ if(i == 1 || i == 3){ h_X[i] = (double)(-i-1); } else{ h_X[i] = (double)(i+1); } } for(int i = 0; i < W_N; i++){ h_W[i] = double(i+1); } for(int i = 0; i < V_HEIGHT; i++){ h_v[i] = (double)(i+1); } for(int i = 0; i < N; i++){ h_y[i] = (double)(i+1); } /*** TEST 2 ***/ // rand((unsigned int)time(NULL)); // for (int i = 0; i< A_N; i++){ // h_A[i] = (double)rand()/(double)(RAND_MAX); // } // for (int i = 0; i< B_N; i++){ // h_B[i] = (double)rand()/(double)(RAND_MAX); // } // Allocate device memory hipMalloc((void**)&d_X, sizeof(double) * X_N); hipMalloc((void**)&d_X_T, sizeof(double) * X_N); hipMalloc((void**)&d_Z, sizeof(double) * Z_N); hipMalloc((void**)&d_Z_T, sizeof(double) * Z_N); hipMalloc((void**)&d_W, sizeof(double) * W_N); hipMalloc((void**)&d_v, sizeof(double) * V_N); hipMalloc((void**)&d_yhat, sizeof(double) * N); hipMalloc((void**)&d_y, sizeof(double) * N); hipMalloc((void**)&d_error, sizeof(double) * N); hipMalloc((void**)&d_grad_v, sizeof(double) * V_N); hipMalloc((void**)&d_grad_Z, sizeof(double) * Z_N); hipMalloc((void**)&d_grad_p_T, sizeof(double) * Z_N); hipMalloc((void**)&d_grad_W, sizeof(double) * W_N); hipMalloc((void**)&d_err_sq, sizeof(double) * N); // Transfer data from host to device memory hipMemcpy(d_X, h_X, sizeof(double) * X_N, hipMemcpyHostToDevice); hipMemcpy(d_W, h_W, sizeof(double) * W_N, hipMemcpyHostToDevice); hipMemcpy(d_v, h_v, sizeof(double) * V_N, hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, sizeof(double) * N, hipMemcpyHostToDevice); // Executing kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // X_HEIGHT (N) corresponding to OUT_WIDTH, X_WIDTH (D) corresponding to IN_WIDTH dim3 dimGrid1(N / BLOCK_SIZE + 1,D / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_transpose), dim3(dimGrid1),dim3(dimBlock), 0, 0, d_X_T, d_X, D, N); dim3 dimGrid2(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); hipLaunchKernelGGL(( relu_matrix_mul), dim3(dimGrid2),dim3(dimBlock), 0, 0, d_Z_T, d_W, d_X_T, K, D, N); dim3 dimGrid3(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_transpose), dim3(dimGrid3),dim3(dimBlock), 0, 0, d_Z, d_Z_T, N, K); dim3 dimGrid4(N / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_mul), dim3(dimGrid4),dim3(dimBlock), 0, 0, d_yhat, d_Z, d_v, N, K, 1); // backwards: hipLaunchKernelGGL(( vector_sub), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_error, d_yhat, d_y, N); dim3 dimGrid5(K / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_mul), dim3(dimGrid5),dim3(dimBlock), 0, 0, d_grad_v, d_Z_T, d_error, K, N, 1); dim3 dimGrid6(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); hipLaunchKernelGGL(( d_relu_matrix_mul), dim3(dimGrid6),dim3(dimBlock), 0, 0, d_grad_Z, d_error, d_v, d_Z, N, 1, K); dim3 dimGrid7(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_transpose), dim3(dimGrid7),dim3(dimBlock), 0, 0, d_grad_p_T, d_grad_Z, K, N); dim3 dimGrid8(K / BLOCK_SIZE + 1, D / BLOCK_SIZE + 1); hipLaunchKernelGGL(( matrix_mul), dim3(dimGrid8),dim3(dimBlock), 0, 0, d_grad_W, d_grad_p_T, d_X, K, N, D); // update hipLaunchKernelGGL(( update), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_W, d_grad_W, (STEP/N), W_N); hipLaunchKernelGGL(( update), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_v, d_grad_v, (STEP/N), V_N); hipMemcpy(h_W, d_W, sizeof(double) * W_N, hipMemcpyDeviceToHost); hipMemcpy(h_v, d_v, sizeof(double) * V_N, hipMemcpyDeviceToHost); // get MSE back hipLaunchKernelGGL(( square), dim3(N / LINEAR_BLOCK_SIZE + 1), dim3(LINEAR_BLOCK_SIZE), 0, 0, d_err_sq, d_error, N); hipMemcpy(h_err_sq, d_err_sq, sizeof(double) * N, hipMemcpyDeviceToHost); double sum = 0.0; for(int i = 0; i < N; i++){ sum += h_err_sq[i]; } printf("MSE is %f\n", sum / N); // Verification for(int i = 0; i < K; i++){ for(int j = 0; j < D; j++){ // double sum = 0.0; // for(int k = 0; k < A_WIDTH; k++){ // sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j]; // } // h_ref[i * C_WIDTH + j] = sum; // assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR); printf("h_W[%d][%d] = %f\n", i, j, h_W[i * D + j]); // printf("h_Z[%d][%d] = %f\n", i, j, h_Z[i * K + j]); // printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]); } } for(int i = 0; i < K; i++){ printf("h_v[%d] = %f\n", i, h_v[i]); } printf("PASSED\n"); // Deallocate device memory hipFree(d_X); hipFree(d_X_T); hipFree(d_W); hipFree(d_v); hipFree(d_Z); hipFree(d_Z_T); hipFree(d_yhat); hipFree(d_y); hipFree(d_error); hipFree(d_grad_v); hipFree(d_grad_Z); hipFree(d_grad_p_T); hipFree(d_grad_W); hipFree(d_err_sq); // Deallocate host memory free(h_X); free(h_W); free(h_v); free(h_Z); free(h_Z_T); free(h_yhat); free(h_y); free(h_error); free(h_grad_v); free(h_grad_Z); free(h_grad_p_T); free(h_grad_W); free(h_err_sq); }
4db4206f93b3f4af435bbb2894b5733f227f12b6.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> // forward propogation /* def forwardBackward(X, y, W, v): Z_trans = relu(W@X.T) # mat-mat Z = Z_trans.T # trans yhat = Z@v # mat-vec error = y - yhat grad_v = Z.T @ error # mat-vector grad_Z = np.outer(error, v) # outer product grad_p = dRelu(dZ, Z) grad_W = dp.T @ X # mat-mat */ /* Parameter Setup */ #define N 4 // # of input samples #define D 2 // # of input neurons #define K 3 // # of hidden neurons #define STEP 0.001 // learning rate or step size // X: input matrix (n * d) #define X_HEIGHT N #define X_WIDTH D #define X_N X_HEIGHT * X_WIDTH // Z: ifmap matrix (n * k) #define Z_HEIGHT N #define Z_WIDTH K #define Z_N Z_HEIGHT * Z_WIDTH // W: layer 1 weights (k * d) #define W_HEIGHT K #define W_WIDTH D #define W_N W_HEIGHT * W_WIDTH // v: layer 2 weights #define V_HEIGHT K #define V_WIDTH 1 #define V_N V_HEIGHT * V_WIDTH #define BLOCK_SIZE 32 #define LINEAR_BLOCK_SIZE BLOCK_SIZE * BLOCK_SIZE #define MAX_ERR 1e-6 __global__ void matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = sum; } } __global__ void relu_matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = (sum>0)?sum:0; } } __global__ void d_relu_matrix_mul(double *d_C, double *d_A, double *d_B, double *d_act, int d_a_height, int d_a_width, int d_b_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(rid < d_a_height && cid < d_b_width){ // sum: to evaluated dot product double sum = 0.0; for(int k = 0; k < d_a_width; k++){ sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid]; } d_C[rid * d_b_width + cid] = (d_act[rid * d_b_width + cid]>0)?sum:0; } } __global__ void matrix_transpose(double *d_out, double *d_in, int d_in_width, int d_out_width) { int cid = blockIdx.y * blockDim.y + threadIdx.y; int rid = blockIdx.x * blockDim.x + threadIdx.x; if(cid < d_in_width && rid < d_out_width){ d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid]; } } __global__ void vector_sub(double *out, double *a, double *b, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = a[tid] - b[tid]; } } __global__ void update(double *d_weights, double *d_grads, double step, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ d_weights[tid] -= step * d_grads[tid]; } } __global__ void square(double *out, double *in, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n){ out[tid] = in[tid] * in[tid]; } } int main(){ // forward variables double *h_X, *h_X_T, *h_W, *h_Z, *h_Z_T, *h_v, *h_yhat, *h_y; double *d_X, *d_X_T, *d_W, *d_Z, *d_Z_T, *d_v, *d_yhat, *d_y; // backward variables double *h_error, *h_grad_v, *h_grad_Z, *h_grad_p_T, *h_grad_W, *h_err_sq; double *d_error, *d_grad_v, *d_grad_Z, *d_grad_p_T, *d_grad_W, *d_err_sq; // double *h_ref; // compute verified results // Allocate host memory h_X = (double*)malloc(sizeof(double) * X_N); h_W = (double*)malloc(sizeof(double) * W_N); h_v = (double*)malloc(sizeof(double) * V_N); h_Z_T = (double*)malloc(sizeof(double) * Z_N); h_Z = (double*)malloc(sizeof(double) * Z_N); h_yhat = (double*)malloc(sizeof(double) * N); h_y = (double*)malloc(sizeof(double) * N); h_error = (double*)malloc(sizeof(double) * N); h_grad_v = (double*)malloc(sizeof(double) * V_N); h_grad_Z = (double*)malloc(sizeof(double) * Z_N); h_grad_p_T = (double*)malloc(sizeof(double) * Z_N); h_grad_W = (double*)malloc(sizeof(double) * W_N); h_err_sq = (double*)malloc(sizeof(double) * N); // h_ref = (double*)malloc(sizeof(double) * N); // Initialize host arrays /*** TEST 1 ***/ for(int i = 0; i < X_N; i++){ if(i == 1 || i == 3){ h_X[i] = (double)(-i-1); } else{ h_X[i] = (double)(i+1); } } for(int i = 0; i < W_N; i++){ h_W[i] = double(i+1); } for(int i = 0; i < V_HEIGHT; i++){ h_v[i] = (double)(i+1); } for(int i = 0; i < N; i++){ h_y[i] = (double)(i+1); } /*** TEST 2 ***/ // rand((unsigned int)time(NULL)); // for (int i = 0; i< A_N; i++){ // h_A[i] = (double)rand()/(double)(RAND_MAX); // } // for (int i = 0; i< B_N; i++){ // h_B[i] = (double)rand()/(double)(RAND_MAX); // } // Allocate device memory cudaMalloc((void**)&d_X, sizeof(double) * X_N); cudaMalloc((void**)&d_X_T, sizeof(double) * X_N); cudaMalloc((void**)&d_Z, sizeof(double) * Z_N); cudaMalloc((void**)&d_Z_T, sizeof(double) * Z_N); cudaMalloc((void**)&d_W, sizeof(double) * W_N); cudaMalloc((void**)&d_v, sizeof(double) * V_N); cudaMalloc((void**)&d_yhat, sizeof(double) * N); cudaMalloc((void**)&d_y, sizeof(double) * N); cudaMalloc((void**)&d_error, sizeof(double) * N); cudaMalloc((void**)&d_grad_v, sizeof(double) * V_N); cudaMalloc((void**)&d_grad_Z, sizeof(double) * Z_N); cudaMalloc((void**)&d_grad_p_T, sizeof(double) * Z_N); cudaMalloc((void**)&d_grad_W, sizeof(double) * W_N); cudaMalloc((void**)&d_err_sq, sizeof(double) * N); // Transfer data from host to device memory cudaMemcpy(d_X, h_X, sizeof(double) * X_N, cudaMemcpyHostToDevice); cudaMemcpy(d_W, h_W, sizeof(double) * W_N, cudaMemcpyHostToDevice); cudaMemcpy(d_v, h_v, sizeof(double) * V_N, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, sizeof(double) * N, cudaMemcpyHostToDevice); // Executing kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // X_HEIGHT (N) corresponding to OUT_WIDTH, X_WIDTH (D) corresponding to IN_WIDTH dim3 dimGrid1(N / BLOCK_SIZE + 1,D / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid1,dimBlock>>>(d_X_T, d_X, D, N); dim3 dimGrid2(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); relu_matrix_mul<<<dimGrid2,dimBlock>>>(d_Z_T, d_W, d_X_T, K, D, N); dim3 dimGrid3(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid3,dimBlock>>>(d_Z, d_Z_T, N, K); dim3 dimGrid4(N / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); matrix_mul<<<dimGrid4,dimBlock>>>(d_yhat, d_Z, d_v, N, K, 1); // backwards: vector_sub<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_error, d_yhat, d_y, N); dim3 dimGrid5(K / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1); matrix_mul<<<dimGrid5,dimBlock>>>(d_grad_v, d_Z_T, d_error, K, N, 1); dim3 dimGrid6(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); d_relu_matrix_mul<<<dimGrid6,dimBlock>>>(d_grad_Z, d_error, d_v, d_Z, N, 1, K); dim3 dimGrid7(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1); matrix_transpose<<<dimGrid7,dimBlock>>>(d_grad_p_T, d_grad_Z, K, N); dim3 dimGrid8(K / BLOCK_SIZE + 1, D / BLOCK_SIZE + 1); matrix_mul<<<dimGrid8,dimBlock>>>(d_grad_W, d_grad_p_T, d_X, K, N, D); // update update<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_W, d_grad_W, (STEP/N), W_N); update<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_v, d_grad_v, (STEP/N), V_N); cudaMemcpy(h_W, d_W, sizeof(double) * W_N, cudaMemcpyDeviceToHost); cudaMemcpy(h_v, d_v, sizeof(double) * V_N, cudaMemcpyDeviceToHost); // get MSE back square<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_err_sq, d_error, N); cudaMemcpy(h_err_sq, d_err_sq, sizeof(double) * N, cudaMemcpyDeviceToHost); double sum = 0.0; for(int i = 0; i < N; i++){ sum += h_err_sq[i]; } printf("MSE is %f\n", sum / N); // Verification for(int i = 0; i < K; i++){ for(int j = 0; j < D; j++){ // double sum = 0.0; // for(int k = 0; k < A_WIDTH; k++){ // sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j]; // } // h_ref[i * C_WIDTH + j] = sum; // assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR); printf("h_W[%d][%d] = %f\n", i, j, h_W[i * D + j]); // printf("h_Z[%d][%d] = %f\n", i, j, h_Z[i * K + j]); // printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]); } } for(int i = 0; i < K; i++){ printf("h_v[%d] = %f\n", i, h_v[i]); } printf("PASSED\n"); // Deallocate device memory cudaFree(d_X); cudaFree(d_X_T); cudaFree(d_W); cudaFree(d_v); cudaFree(d_Z); cudaFree(d_Z_T); cudaFree(d_yhat); cudaFree(d_y); cudaFree(d_error); cudaFree(d_grad_v); cudaFree(d_grad_Z); cudaFree(d_grad_p_T); cudaFree(d_grad_W); cudaFree(d_err_sq); // Deallocate host memory free(h_X); free(h_W); free(h_v); free(h_Z); free(h_Z_T); free(h_yhat); free(h_y); free(h_error); free(h_grad_v); free(h_grad_Z); free(h_grad_p_T); free(h_grad_W); free(h_err_sq); }
6d6deb3e1c868941ffd40f2f422ad83b68f6358a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "maxpool_layer.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad / 2; int h_offset = -pad / 2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int area = (size-1)/stride; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; float d = 0; int l, m; for(l = -area; l < area+1; ++l){ for(m = -area; m < area+1; ++m){ int out_w = (j-w_offset)/stride + m; int out_h = (i-h_offset)/stride + l; int out_index = out_w + w*(out_h + h*(k + c*b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; } extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net) { int h = layer.out_h; int w = layer.out_w; int c = layer.c; size_t n = h*w*c*layer.batch; hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu); check_error(hipPeekAtLastError()); /* // for test cuda_pull_array(layer.output_gpu, layer.output, layer.batch*layer.outputs); if (layer.post_training_quantization) restore(layer.output, layer.batch*layer.outputs, *(net.fl)); int o, p, q; printf("layer %d, maxpool output:\n", layer.current_layer_index); for (o=0; o<2; ++o) { for(p=5; p<10; ++p) { for(q=5; q<10; ++q) { printf("%f ", layer.output[o*h*w+p*w+q]); } printf("\n"); } } printf("\n"); */ if (net.write_results) { cuda_pull_array(layer.output_gpu, layer.output, layer.batch*layer.outputs); char buff[50]; sprintf(buff, "ship/statistics/outputs/maxpool_%02d.dat", layer.current_layer_index); FILE *fp; fp = fopen(buff, "wb"); fwrite(layer.output, sizeof(float), layer.batch*layer.outputs, fp); fclose(fp); } } extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net) { size_t n = layer.h*layer.w*layer.c*layer.batch; hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); check_error(hipPeekAtLastError()); }
6d6deb3e1c868941ffd40f2f422ad83b68f6358a.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "maxpool_layer.h" #include "cuda.h" #include "utils.h" } __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad / 2; int h_offset = -pad / 2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int area = (size-1)/stride; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; float d = 0; int l, m; for(l = -area; l < area+1; ++l){ for(m = -area; m < area+1; ++m){ int out_w = (j-w_offset)/stride + m; int out_h = (i-h_offset)/stride + l; int out_index = out_w + w*(out_h + h*(k + c*b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; } extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net) { int h = layer.out_h; int w = layer.out_w; int c = layer.c; size_t n = h*w*c*layer.batch; forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu); check_error(cudaPeekAtLastError()); /* // for test cuda_pull_array(layer.output_gpu, layer.output, layer.batch*layer.outputs); if (layer.post_training_quantization) restore(layer.output, layer.batch*layer.outputs, *(net.fl)); int o, p, q; printf("layer %d, maxpool output:\n", layer.current_layer_index); for (o=0; o<2; ++o) { for(p=5; p<10; ++p) { for(q=5; q<10; ++q) { printf("%f ", layer.output[o*h*w+p*w+q]); } printf("\n"); } } printf("\n"); */ if (net.write_results) { cuda_pull_array(layer.output_gpu, layer.output, layer.batch*layer.outputs); char buff[50]; sprintf(buff, "ship/statistics/outputs/maxpool_%02d.dat", layer.current_layer_index); FILE *fp; fp = fopen(buff, "wb"); fwrite(layer.output, sizeof(float), layer.batch*layer.outputs, fp); fclose(fp); } } extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net) { size_t n = layer.h*layer.w*layer.c*layer.batch; backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); check_error(cudaPeekAtLastError()); }
fc0e5137450af798f4126da939dc30216d0d1616.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <chrono> #include <stdlib.h> #define TILESIZE 32 using namespace std; int N = 1000; int numCRows; int numARows = N; int numBRows = N; int numCColumns; int numAColumns = N; int numBColumns = N; __global__ void cudaWithBlocksAndThreads(float *MatA, float *MatB, float *MatC, const int nx, const int ny) { //Codigo de clase unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; int i, j; if (ix < nx) { for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { MatC[ix * nx + i] += (MatA[ix * nx + j] * MatB[j * nx + i]); } } } return; } __global__ void matrixMultiplyTiled(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float sA[TILESIZE][TILESIZE]; __shared__ float sB[TILESIZE][TILESIZE]; int Row = blockDim.y*blockIdx.y + threadIdx.y; int Col = blockDim.x*blockIdx.x + threadIdx.x; float Cvalue = 0.0; sA[threadIdx.y][threadIdx.x] = 0.0; sB[threadIdx.y][threadIdx.x] = 0.0; for (int k = 0; k < (((numAColumns - 1)/ TILESIZE) + 1); k++) { // Copy Data to Tile from Matrix if ( (Row < numARows) && (threadIdx.x + (k*TILESIZE)) < numAColumns) { sA[threadIdx.y][threadIdx.x] = A[(Row*numAColumns) + threadIdx.x + (k*TILESIZE)]; } else { sA[threadIdx.y][threadIdx.x] = 0.0; } // Copy Data to Tile from Matrix (Global Memory to Shared Memory) if ( Col < numBColumns && (threadIdx.y + k*TILESIZE) < numBRows) { sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k*TILESIZE)*numBColumns + Col]; } else { sB[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); // Multiplying Elements in tile for (int j = 0; j < TILESIZE; ++j) { Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x]; } } // Saving result to C if (Row < numCRows && Col < numCColumns) { C[Row*numCColumns + Col] = Cvalue; } } void matMultiplyOnCPU(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i=0; i < numARows; i ++) { for (int j = 0; j < numAColumns; j++) { C[i*numCColumns + j ] = 0.0; for (int k = 0; k < numCColumns; k++) { C[i*numCColumns + j ] += A[i*numAColumns + k] * B [k*numBColumns + j]; } } } return; } int main(int argc, char ** argv) { float * hostA; float * hostB; float * hostC; float * hostC2; float * hostComputedC; float * deviceA; float * deviceB; float * deviceC; float * deviceA2; float * deviceB2; float * deviceC2; // Alloc CPU Memory hostA = (float *) malloc(sizeof(float)*numARows*numAColumns); hostB = (float *) malloc(sizeof(float)*numBRows*numBColumns); // Initialize matrix A and B for (int i = 0; i < numARows*numAColumns; i++) { hostA[i] = 1.0; hostB[i] = 1.0; } // Setting matrix C numCRows = numARows; numCColumns = numBColumns; hostC = (float *) malloc(sizeof(float)*numCRows*numCColumns); hostC2 = (float *) malloc(sizeof(float)*numCRows*numCColumns); hostComputedC = (float *) malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory TILED SAFE_CALL(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns), "Error Allocation GPU memory A"); SAFE_CALL(hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns), "Error Allocation GPU memory B"); SAFE_CALL(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns), "Error Allocation GPU memory C"); // Allocating GPU memory NOT TILED SAFE_CALL(hipMalloc((void **)&deviceA2, sizeof(float)*numARows*numAColumns), "Error Allocation GPU memory A2"); SAFE_CALL(hipMalloc((void **)&deviceB2, sizeof(float)*numBRows*numBColumns), "Error Allocation GPU memory B2"); SAFE_CALL(hipMalloc((void **)&deviceC2, sizeof(float)*numCRows*numCColumns), "Error Allocation GPU memory C2"); // Copy memory to the GPU TILED SAFE_CALL(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice), "Error Copying memory to GPU A"); SAFE_CALL(hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice), "Error Copying memory to GPU B"); // Copy memory to the GPU NOT TILED SAFE_CALL(hipMemcpy(deviceA2, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice), "Error Copying memory to GPU A2"); SAFE_CALL(hipMemcpy(deviceB2, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice), "Error Copying memory to GPU B2"); // Initialize the grid and block dimensions // Blocks required dim3 dimGrid((numCColumns / TILESIZE) + 1, (numCRows / TILESIZE) + 1, 1); // Threads in each block dim3 dimBlock(TILESIZE, TILESIZE, 1); // Call TILED kernel auto start = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( matrixMultiplyTiled), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); auto end = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end - start; printf("TILED GPU %f ms\n", duration_ms.count()); hipError_t err1 = hipPeekAtLastError(); // Sync device hipDeviceSynchronize(); // Copy the results in GPU memory back to the CPU SAFE_CALL(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost), "Error copying results from GPU to CPU"); // Call CPU MatMult auto start2 = chrono::high_resolution_clock::now(); matMultiplyOnCPU(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); auto end2 = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms2 = end2 - start2; printf("CPU %f ms\n", duration_ms2.count()); // Call NOT TILED Kernel auto start3 = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( cudaWithBlocksAndThreads), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA2, deviceB2, deviceC2, numARows, numARows); auto end3 = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms3 = end3 - start3; printf("NOT TILED GPU %f ms\n", duration_ms3.count()); // Sync device hipDeviceSynchronize(); // Copy the results in GPU memory back to the CPU SAFE_CALL(hipMemcpy(hostC2, deviceC2, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost), "Error copying results from GPU to CPU"); // Compare reults from CPU and GPU for (int i=0; i < numCColumns*numCRows; i++) { if (hostComputedC[i] != hostC[i]) { printf("Diferentes valores en Row = %d Col = %d CPU[] = %f --GPU[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]); break; } } // Free the GPU memory SAFE_CALL(hipFree(deviceA), "Error Freeing GPU Memory A"); SAFE_CALL(hipFree(deviceB), "Error Freeing GPU Memory B"); SAFE_CALL(hipFree(deviceC), "Error Freeing GPU Memory C"); SAFE_CALL(hipFree(deviceA2), "Error Freeing GPU Memory A2"); SAFE_CALL(hipFree(deviceB2), "Error Freeing GPU Memory B2"); SAFE_CALL(hipFree(deviceC2), "Error Freeing GPU Memory C2"); //Free the Pointer Memory free(hostA); free(hostB); free(hostC); free(hostC2); free(hostComputedC); return 0; }
fc0e5137450af798f4126da939dc30216d0d1616.cu
#include "common.h" #include <stdio.h> #include <cuda.h> #include <chrono> #include <stdlib.h> #define TILESIZE 32 using namespace std; int N = 1000; int numCRows; int numARows = N; int numBRows = N; int numCColumns; int numAColumns = N; int numBColumns = N; __global__ void cudaWithBlocksAndThreads(float *MatA, float *MatB, float *MatC, const int nx, const int ny) { //Codigo de clase unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; int i, j; if (ix < nx) { for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { MatC[ix * nx + i] += (MatA[ix * nx + j] * MatB[j * nx + i]); } } } return; } __global__ void matrixMultiplyTiled(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float sA[TILESIZE][TILESIZE]; __shared__ float sB[TILESIZE][TILESIZE]; int Row = blockDim.y*blockIdx.y + threadIdx.y; int Col = blockDim.x*blockIdx.x + threadIdx.x; float Cvalue = 0.0; sA[threadIdx.y][threadIdx.x] = 0.0; sB[threadIdx.y][threadIdx.x] = 0.0; for (int k = 0; k < (((numAColumns - 1)/ TILESIZE) + 1); k++) { // Copy Data to Tile from Matrix if ( (Row < numARows) && (threadIdx.x + (k*TILESIZE)) < numAColumns) { sA[threadIdx.y][threadIdx.x] = A[(Row*numAColumns) + threadIdx.x + (k*TILESIZE)]; } else { sA[threadIdx.y][threadIdx.x] = 0.0; } // Copy Data to Tile from Matrix (Global Memory to Shared Memory) if ( Col < numBColumns && (threadIdx.y + k*TILESIZE) < numBRows) { sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k*TILESIZE)*numBColumns + Col]; } else { sB[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); // Multiplying Elements in tile for (int j = 0; j < TILESIZE; ++j) { Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x]; } } // Saving result to C if (Row < numCRows && Col < numCColumns) { C[Row*numCColumns + Col] = Cvalue; } } void matMultiplyOnCPU(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i=0; i < numARows; i ++) { for (int j = 0; j < numAColumns; j++) { C[i*numCColumns + j ] = 0.0; for (int k = 0; k < numCColumns; k++) { C[i*numCColumns + j ] += A[i*numAColumns + k] * B [k*numBColumns + j]; } } } return; } int main(int argc, char ** argv) { float * hostA; float * hostB; float * hostC; float * hostC2; float * hostComputedC; float * deviceA; float * deviceB; float * deviceC; float * deviceA2; float * deviceB2; float * deviceC2; // Alloc CPU Memory hostA = (float *) malloc(sizeof(float)*numARows*numAColumns); hostB = (float *) malloc(sizeof(float)*numBRows*numBColumns); // Initialize matrix A and B for (int i = 0; i < numARows*numAColumns; i++) { hostA[i] = 1.0; hostB[i] = 1.0; } // Setting matrix C numCRows = numARows; numCColumns = numBColumns; hostC = (float *) malloc(sizeof(float)*numCRows*numCColumns); hostC2 = (float *) malloc(sizeof(float)*numCRows*numCColumns); hostComputedC = (float *) malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory TILED SAFE_CALL(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns), "Error Allocation GPU memory A"); SAFE_CALL(cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns), "Error Allocation GPU memory B"); SAFE_CALL(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns), "Error Allocation GPU memory C"); // Allocating GPU memory NOT TILED SAFE_CALL(cudaMalloc((void **)&deviceA2, sizeof(float)*numARows*numAColumns), "Error Allocation GPU memory A2"); SAFE_CALL(cudaMalloc((void **)&deviceB2, sizeof(float)*numBRows*numBColumns), "Error Allocation GPU memory B2"); SAFE_CALL(cudaMalloc((void **)&deviceC2, sizeof(float)*numCRows*numCColumns), "Error Allocation GPU memory C2"); // Copy memory to the GPU TILED SAFE_CALL(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice), "Error Copying memory to GPU A"); SAFE_CALL(cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice), "Error Copying memory to GPU B"); // Copy memory to the GPU NOT TILED SAFE_CALL(cudaMemcpy(deviceA2, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice), "Error Copying memory to GPU A2"); SAFE_CALL(cudaMemcpy(deviceB2, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice), "Error Copying memory to GPU B2"); // Initialize the grid and block dimensions // Blocks required dim3 dimGrid((numCColumns / TILESIZE) + 1, (numCRows / TILESIZE) + 1, 1); // Threads in each block dim3 dimBlock(TILESIZE, TILESIZE, 1); // Call TILED kernel auto start = chrono::high_resolution_clock::now(); matrixMultiplyTiled<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); auto end = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end - start; printf("TILED GPU %f ms\n", duration_ms.count()); cudaError_t err1 = cudaPeekAtLastError(); // Sync device cudaDeviceSynchronize(); // Copy the results in GPU memory back to the CPU SAFE_CALL(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost), "Error copying results from GPU to CPU"); // Call CPU MatMult auto start2 = chrono::high_resolution_clock::now(); matMultiplyOnCPU(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); auto end2 = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms2 = end2 - start2; printf("CPU %f ms\n", duration_ms2.count()); // Call NOT TILED Kernel auto start3 = chrono::high_resolution_clock::now(); cudaWithBlocksAndThreads<<<dimGrid, dimBlock>>>(deviceA2, deviceB2, deviceC2, numARows, numARows); auto end3 = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms3 = end3 - start3; printf("NOT TILED GPU %f ms\n", duration_ms3.count()); // Sync device cudaDeviceSynchronize(); // Copy the results in GPU memory back to the CPU SAFE_CALL(cudaMemcpy(hostC2, deviceC2, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost), "Error copying results from GPU to CPU"); // Compare reults from CPU and GPU for (int i=0; i < numCColumns*numCRows; i++) { if (hostComputedC[i] != hostC[i]) { printf("Diferentes valores en Row = %d Col = %d CPU[] = %f --GPU[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]); break; } } // Free the GPU memory SAFE_CALL(cudaFree(deviceA), "Error Freeing GPU Memory A"); SAFE_CALL(cudaFree(deviceB), "Error Freeing GPU Memory B"); SAFE_CALL(cudaFree(deviceC), "Error Freeing GPU Memory C"); SAFE_CALL(cudaFree(deviceA2), "Error Freeing GPU Memory A2"); SAFE_CALL(cudaFree(deviceB2), "Error Freeing GPU Memory B2"); SAFE_CALL(cudaFree(deviceC2), "Error Freeing GPU Memory C2"); //Free the Pointer Memory free(hostA); free(hostB); free(hostC); free(hostC2); free(hostComputedC); return 0; }
9952a052e2bd7cb74addba407422dc08086ba729.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ABserial.c * * Created on: Nov 11, 2014 * Author: nathan */ #include "ab_pt1.h" extern "C" { #include "boundBox.h" } extern "C" { #include "computeAuxiliaryGrid_pt1.h" } extern "C" { #include "compactAuxiliaryGrid_pt1.h" } //#include "writecell.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/times.h> #include <sys/time.h> #include "cuda_utils.h" #include "timer.h" #include <time.h> __global__ void wd_ab_parallel_t1(double *xc, double *yc, double *xf, double *yf, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ int myId, includesAuxCells, j, index; double r, rtemp, rAux; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((xc[myId]-xbox[j]),2) + pow((yc[myId]-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(xc[myId]-compAuxCells[j].xcenter,2) + pow(yc[myId]-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } // if(myId==0){ // printf("rAux, r: %f, %f\n",rAux,r); // } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(xc[myId]-compAuxCells[j].xcenter,2) + pow(yc[myId]-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(xc[myId]-xf[compAuxCells[j].faceIndex[index]],2) + pow(yc[myId]-yf[compAuxCells[j].faceIndex[index]],2)); // If dist is smaller than current wallDist, replace if(rtemp<wallDist[myId]){ wallDist[myId]=rtemp; } index++; } } } } //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t2(double *cellCenters, double *xf, double *yf, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_cellCenters[]; int myId, tid, includesAuxCells, j, index; double r, rtemp, rAux; tid = threadIdx.x; myId = threadIdx.x + blockDim.x * blockIdx.x; // Pull cell centers into shared memory s_cellCenters[2*tid] = cellCenters[2*myId]; s_cellCenters[2*tid+1] = cellCenters[2*myId+1]; // Keep array access bounded if (myId >= size_c){ return; } // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((s_cellCenters[2*tid]-xbox[j]),2) + pow((s_cellCenters[2*tid+1]-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(s_cellCenters[2*tid]-compAuxCells[j].xcenter,2) + pow(s_cellCenters[2*tid+1]-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } // if(myId==0){ // printf("rAux, r: %f, %f\n",rAux,r); // } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(s_cellCenters[2*tid]-compAuxCells[j].xcenter,2) + pow(s_cellCenters[2*tid+1]-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(s_cellCenters[2*tid]-xf[compAuxCells[j].faceIndex[index]],2) + pow(s_cellCenters[2*tid+1]-yf[compAuxCells[j].faceIndex[index]],2)); // If dist is smaller than current wallDist, replace if(rtemp<wallDist[myId]){ wallDist[myId]=rtemp; } index++; } } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t3(double *cellCenters, double *faceCenters, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_faceCenters[]; int myId, tid, includesAuxCells, i, j, index; double r, rtemp, rcurrent, rAux; tid = threadIdx.x; myId = threadIdx.x + blockDim.x * blockIdx.x; // Pull face centers into shared memory if (tid==0){ for (i=0; i<size_f; i++){ s_faceCenters[2*i] = faceCenters[2*i]; s_faceCenters[2*i+1] = faceCenters[2*i+1]; } } __syncthreads(); // Keep array access bounded if (myId >= size_c){ return; } // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((cellCenters[2*myId]-xbox[j]),2) + pow((cellCenters[2*myId+1]-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(cellCenters[2*myId]-compAuxCells[j].xcenter,2) + pow(cellCenters[2*myId+1]-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } // if(myId==0){ // printf("rAux, r: %f, %f\n",rAux,r); // } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(cellCenters[2*myId]-compAuxCells[j].xcenter,2) + pow(cellCenters[2*myId+1]-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(cellCenters[2*myId]-s_faceCenters[compAuxCells[j].faceIndex[index]],2) + pow(cellCenters[2*myId+1]-s_faceCenters[compAuxCells[j].faceIndex[index]],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t4(double *cellCenters, double *faceCenters, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-xbox[j]),2) + pow((c_y-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index]+1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t5(double *cellCenters, double *faceCenters, double *box, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index] + 1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t6(double *cellCenters, double *faceCenters, double *box, struct cell_pt1 *compAuxCells, double *auxCenters, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index] + 1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t7(double *cellCenters, double *faceCenters, double *box, struct cell_pt1 *compAuxCells, double *auxCenters, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index] + 1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ab_parallel_t1(double * xc, double * yc, double * xf, double * yf, int size_c, int size_f, double * wallDist){ double xmin; double xmax; double ymin; double ymax; //////////////////////////////////////////////////////////////////// // Pre-processing //////////////////////////////////////////////////////////////////// // Create geometry bounding box boundBox(xf,yf,size_f,&xmin,&xmax,&ymin,&ymax); // Create auxiliary grid int resI=80; int resJ=80; double auxDiag = sqrt( pow((xmax-xmin)/(double)(resI-1),2) + pow((ymax-ymin)/(double)(resJ-1),2)); int numAuxCells = (resI-1)*(resJ-1); int i, j, cellsWithFaces; struct cell_pt1 *auxCells; // auxCells = (struct cell_pt1 *)malloc(numAuxCells*sizeof(struct cell_pt1)); auxCells = new cell_pt1[numAuxCells]; computeAuxiliaryGrid_pt1(xmin,xmax,ymin,ymax,resI,resJ,auxCells); // Count number of auxiliary cells that contain geometry faces cellsWithFaces = 0; for (i=0; i<numAuxCells; i++){ for (j=0; j<size_f; j++){ if (xf[j] < auxCells[i].xmax && xf[j] > auxCells[i].xmin && yf[j] < auxCells[i].ymax && yf[j] > auxCells[i].ymin){ cellsWithFaces++; break; } } } // Allocate memory for compacted cells struct cell_pt1 * compAuxCells; // compAuxCells = (struct cell_pt1 *)malloc(cellsWithFaces*sizeof(struct cell_pt1)); compAuxCells = new cell_pt1[cellsWithFaces]; /////// compactAuxiliaryGrid_pt1(auxCells,numAuxCells,compAuxCells,xf,yf,size_f); /////// // Bounding box point arrays double xmid = (xmax+xmin)/2.0; double ymid = (ymax+ymin)/2.0; double xBoxPts[8] = {xmin, xmid, xmax, xmax, xmax, xmid, xmin, xmin}; double yBoxPts[8] = {ymin, ymin, ymin, ymid, ymax, ymax, ymax, ymid}; //////////////////////////////////////////////////////////////////////////////// // Combine xc,yc arrays for coallesced memory access in parallel t2 version //////////////////////////////////////////////////////////////////////////////// double *cellCenters; cellCenters = new double[2*size_c]; for (i=0; i<size_c; i++){ cellCenters[2*i] = xc[i]; cellCenters[2*i+1] = yc[i]; } double *faceCenters; faceCenters = new double[2*size_f]; for (i=0; i<size_f; i++){ faceCenters[2*i] = xf[i]; faceCenters[2*i+1] = yf[i]; } double *boxPts; boxPts = new double[16]; for (i=0; i<8; i++){ boxPts[2*i] = xBoxPts[i]; boxPts[2*i+1] = yBoxPts[i]; } double *auxCenters; auxCenters = new double[2*cellsWithFaces*sizeof(double)]; for (i=0; i<cellsWithFaces; i++){ auxCenters[2*i] = compAuxCells[i].xcenter; auxCenters[2*i+1] = compAuxCells[i].ycenter; } //////////////////////////////////////////////////////////////////// // Allocate device memory and copy data //////////////////////////////////////////////////////////////////// // bounding box double *d_xbox, *d_ybox, *d_box; checkCudaErrors(hipMalloc(&d_xbox,8*sizeof(double))); checkCudaErrors(hipMalloc(&d_ybox,8*sizeof(double))); checkCudaErrors(hipMalloc(&d_box,16*sizeof(double))); checkCudaErrors(hipMemcpy(d_xbox,xBoxPts,8*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_ybox,yBoxPts,8*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_box,boxPts,16*sizeof(double),hipMemcpyHostToDevice)); // grid and faces double *d_xc, *d_yc, *d_xf, *d_yf, *d_cellCenters, *d_faceCenters; checkCudaErrors(hipMalloc(&d_xc,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_yc,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_xf,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_yf,size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_cellCenters,2*size_c*sizeof(double))); checkCudaErrors(hipMalloc(&d_faceCenters,2*size_f*sizeof(double))); checkCudaErrors(hipMemcpy(d_xc,xc,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_yc,yc,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_xf,xf,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_yf,yf,size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_cellCenters,cellCenters,2*size_c*sizeof(double),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_faceCenters,faceCenters,2*size_f*sizeof(double),hipMemcpyHostToDevice)); // auxCell structs struct cell_pt1 * d_compAuxCells; checkCudaErrors(hipMalloc((void **)&d_compAuxCells,cellsWithFaces*sizeof(struct cell_pt1))); checkCudaErrors(hipMemcpy(d_compAuxCells,compAuxCells,cellsWithFaces*sizeof(struct cell_pt1),hipMemcpyHostToDevice)); // auxCenter array double *d_auxCenters; checkCudaErrors(hipMalloc(&d_auxCenters,2*cellsWithFaces*sizeof(double))); checkCudaErrors(hipMemcpy(d_auxCenters,auxCenters,2*cellsWithFaces*sizeof(double),hipMemcpyHostToDevice)); // wallDist array double *d_wallDist; checkCudaErrors(hipMalloc(&d_wallDist,size_c*sizeof(double))); checkCudaErrors(hipMemcpy(d_wallDist,wallDist,size_c*sizeof(double),hipMemcpyHostToDevice)); //////////////////////////////////////////////////////////////////// // Wall Distance Calc //////////////////////////////////////////////////////////////////// GpuTimer timer; int threadsPerBlock, numBlocks; threadsPerBlock = 512; numBlocks = (size_c/threadsPerBlock)+1; timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_t1), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_xc,d_yc,d_xf,d_yf,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T1(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // Reset wallDistance checkCudaErrors(hipMemcpy(d_wallDist,wallDist,size_c*sizeof(double),hipMemcpyHostToDevice)); timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_t2), dim3(numBlocks),dim3(threadsPerBlock),2*threadsPerBlock*sizeof(double), 0, d_cellCenters,d_xf,d_yf,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T2(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // timer.Start(); // wd_ab_parallel_t3<<<numBlocks,threadsPerBlock,2*size_f*sizeof(double)>>>(d_cellCenters,d_faceCenters,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); // timer.Stop(); // printf("Advancing boundary - parallel T3(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_t4), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cellCenters,d_faceCenters,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T4(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_t5), dim3(numBlocks),dim3(threadsPerBlock),16*sizeof(double), 0, d_cellCenters,d_faceCenters,d_box,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T5(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_t6), dim3(numBlocks),dim3(threadsPerBlock),16*sizeof(double), 0, d_cellCenters,d_faceCenters,d_box,d_compAuxCells,d_auxCenters,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T6(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); hipLaunchKernelGGL(( wd_ab_parallel_t7), dim3(numBlocks),dim3(threadsPerBlock),16*sizeof(double), 0, d_cellCenters,d_faceCenters,d_box,d_compAuxCells,d_auxCenters,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T6(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // Copy wallDist back to host checkCudaErrors(hipMemcpy(wallDist,d_wallDist,sizeof(double)*size_c,hipMemcpyDeviceToHost)); //////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////// }
9952a052e2bd7cb74addba407422dc08086ba729.cu
/* * ABserial.c * * Created on: Nov 11, 2014 * Author: nathan */ #include "ab_pt1.h" extern "C" { #include "boundBox.h" } extern "C" { #include "computeAuxiliaryGrid_pt1.h" } extern "C" { #include "compactAuxiliaryGrid_pt1.h" } //#include "writecell.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/times.h> #include <sys/time.h> #include "cuda_utils.h" #include "timer.h" #include <time.h> __global__ void wd_ab_parallel_t1(double *xc, double *yc, double *xf, double *yf, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ int myId, includesAuxCells, j, index; double r, rtemp, rAux; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((xc[myId]-xbox[j]),2) + pow((yc[myId]-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(xc[myId]-compAuxCells[j].xcenter,2) + pow(yc[myId]-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } // if(myId==0){ // printf("rAux, r: %f, %f\n",rAux,r); // } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(xc[myId]-compAuxCells[j].xcenter,2) + pow(yc[myId]-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(xc[myId]-xf[compAuxCells[j].faceIndex[index]],2) + pow(yc[myId]-yf[compAuxCells[j].faceIndex[index]],2)); // If dist is smaller than current wallDist, replace if(rtemp<wallDist[myId]){ wallDist[myId]=rtemp; } index++; } } } } //////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t2(double *cellCenters, double *xf, double *yf, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_cellCenters[]; int myId, tid, includesAuxCells, j, index; double r, rtemp, rAux; tid = threadIdx.x; myId = threadIdx.x + blockDim.x * blockIdx.x; // Pull cell centers into shared memory s_cellCenters[2*tid] = cellCenters[2*myId]; s_cellCenters[2*tid+1] = cellCenters[2*myId+1]; // Keep array access bounded if (myId >= size_c){ return; } // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((s_cellCenters[2*tid]-xbox[j]),2) + pow((s_cellCenters[2*tid+1]-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(s_cellCenters[2*tid]-compAuxCells[j].xcenter,2) + pow(s_cellCenters[2*tid+1]-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } // if(myId==0){ // printf("rAux, r: %f, %f\n",rAux,r); // } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(s_cellCenters[2*tid]-compAuxCells[j].xcenter,2) + pow(s_cellCenters[2*tid+1]-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(s_cellCenters[2*tid]-xf[compAuxCells[j].faceIndex[index]],2) + pow(s_cellCenters[2*tid+1]-yf[compAuxCells[j].faceIndex[index]],2)); // If dist is smaller than current wallDist, replace if(rtemp<wallDist[myId]){ wallDist[myId]=rtemp; } index++; } } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t3(double *cellCenters, double *faceCenters, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_faceCenters[]; int myId, tid, includesAuxCells, i, j, index; double r, rtemp, rcurrent, rAux; tid = threadIdx.x; myId = threadIdx.x + blockDim.x * blockIdx.x; // Pull face centers into shared memory if (tid==0){ for (i=0; i<size_f; i++){ s_faceCenters[2*i] = faceCenters[2*i]; s_faceCenters[2*i+1] = faceCenters[2*i+1]; } } __syncthreads(); // Keep array access bounded if (myId >= size_c){ return; } // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((cellCenters[2*myId]-xbox[j]),2) + pow((cellCenters[2*myId+1]-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(cellCenters[2*myId]-compAuxCells[j].xcenter,2) + pow(cellCenters[2*myId+1]-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } // if(myId==0){ // printf("rAux, r: %f, %f\n",rAux,r); // } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(cellCenters[2*myId]-compAuxCells[j].xcenter,2) + pow(cellCenters[2*myId+1]-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(cellCenters[2*myId]-s_faceCenters[compAuxCells[j].faceIndex[index]],2) + pow(cellCenters[2*myId+1]-s_faceCenters[compAuxCells[j].faceIndex[index]],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t4(double *cellCenters, double *faceCenters, double *xbox, double *ybox, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-xbox[j]),2) + pow((c_y-ybox[j]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index]+1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t5(double *cellCenters, double *faceCenters, double *box, struct cell_pt1 *compAuxCells, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-compAuxCells[j].xcenter,2) + pow(c_y-compAuxCells[j].ycenter,2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index] + 1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t6(double *cellCenters, double *faceCenters, double *box, struct cell_pt1 *compAuxCells, double *auxCenters, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index] + 1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void wd_ab_parallel_t7(double *cellCenters, double *faceCenters, double *box, struct cell_pt1 *compAuxCells, double *auxCenters, int size_c, int size_f, int numAuxCells, double auxDiag, double *wallDist){ extern __shared__ double s_box []; int myId, includesAuxCells, j, index; double r, rtemp, rcurrent, rAux, c_x, c_y; myId = threadIdx.x + blockDim.x * blockIdx.x; // Keep array access bounded if (myId >= size_c){ return; } // Pull box pts into shared memory if (threadIdx.x < 16){ s_box[threadIdx.x] = box[threadIdx.x]; } c_x = cellCenters[2*myId]; c_y = cellCenters[2*myId+1]; // Compute initial radius r=1e9; for (j=0; j<8; j++){ rtemp = sqrt( pow((c_x-s_box[2*j]),2) + pow((c_y-s_box[2*j+1]),2) ); if (rtemp<r){ r=rtemp; } } // Loop through compacted auxCell array to see if any lie within rc includesAuxCells = 0; while(includesAuxCells == 0){ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2) ); // Increase r to be sure enough geometry is included if(rAux < r){ r += auxDiag*0.5; includesAuxCells=1; break; } else{ r += auxDiag; } } } /* * Loop through compacted auxCell array. For those that lie within r, * traverse through faces, compute wallDist and check for minimum */ for (j=0; j<numAuxCells; j++){ rAux = sqrt( pow(c_x-auxCenters[2*j],2) + pow(c_y-auxCenters[2*j+1],2)); // Check if auxCell is within radius of interest if(rAux < r){ index = 0; // Loop through faces and compute distance from grid cell center while(index < compAuxCells[j].numFaces){ rtemp = sqrt( pow(c_x-faceCenters[2*compAuxCells[j].faceIndex[index]],2) + pow(c_y-faceCenters[2*compAuxCells[j].faceIndex[index] + 1],2)); // If dist is smaller than current wallDist, replace if(rtemp<rcurrent){ // wallDist[myId]=rtemp; rcurrent = rtemp; } index++; } } } // Store wallDistance to global array wallDist[myId] = rcurrent; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ab_parallel_t1(double * xc, double * yc, double * xf, double * yf, int size_c, int size_f, double * wallDist){ double xmin; double xmax; double ymin; double ymax; //////////////////////////////////////////////////////////////////// // Pre-processing //////////////////////////////////////////////////////////////////// // Create geometry bounding box boundBox(xf,yf,size_f,&xmin,&xmax,&ymin,&ymax); // Create auxiliary grid int resI=80; int resJ=80; double auxDiag = sqrt( pow((xmax-xmin)/(double)(resI-1),2) + pow((ymax-ymin)/(double)(resJ-1),2)); int numAuxCells = (resI-1)*(resJ-1); int i, j, cellsWithFaces; struct cell_pt1 *auxCells; // auxCells = (struct cell_pt1 *)malloc(numAuxCells*sizeof(struct cell_pt1)); auxCells = new cell_pt1[numAuxCells]; computeAuxiliaryGrid_pt1(xmin,xmax,ymin,ymax,resI,resJ,auxCells); // Count number of auxiliary cells that contain geometry faces cellsWithFaces = 0; for (i=0; i<numAuxCells; i++){ for (j=0; j<size_f; j++){ if (xf[j] < auxCells[i].xmax && xf[j] > auxCells[i].xmin && yf[j] < auxCells[i].ymax && yf[j] > auxCells[i].ymin){ cellsWithFaces++; break; } } } // Allocate memory for compacted cells struct cell_pt1 * compAuxCells; // compAuxCells = (struct cell_pt1 *)malloc(cellsWithFaces*sizeof(struct cell_pt1)); compAuxCells = new cell_pt1[cellsWithFaces]; /////// compactAuxiliaryGrid_pt1(auxCells,numAuxCells,compAuxCells,xf,yf,size_f); /////// // Bounding box point arrays double xmid = (xmax+xmin)/2.0; double ymid = (ymax+ymin)/2.0; double xBoxPts[8] = {xmin, xmid, xmax, xmax, xmax, xmid, xmin, xmin}; double yBoxPts[8] = {ymin, ymin, ymin, ymid, ymax, ymax, ymax, ymid}; //////////////////////////////////////////////////////////////////////////////// // Combine xc,yc arrays for coallesced memory access in parallel t2 version //////////////////////////////////////////////////////////////////////////////// double *cellCenters; cellCenters = new double[2*size_c]; for (i=0; i<size_c; i++){ cellCenters[2*i] = xc[i]; cellCenters[2*i+1] = yc[i]; } double *faceCenters; faceCenters = new double[2*size_f]; for (i=0; i<size_f; i++){ faceCenters[2*i] = xf[i]; faceCenters[2*i+1] = yf[i]; } double *boxPts; boxPts = new double[16]; for (i=0; i<8; i++){ boxPts[2*i] = xBoxPts[i]; boxPts[2*i+1] = yBoxPts[i]; } double *auxCenters; auxCenters = new double[2*cellsWithFaces*sizeof(double)]; for (i=0; i<cellsWithFaces; i++){ auxCenters[2*i] = compAuxCells[i].xcenter; auxCenters[2*i+1] = compAuxCells[i].ycenter; } //////////////////////////////////////////////////////////////////// // Allocate device memory and copy data //////////////////////////////////////////////////////////////////// // bounding box double *d_xbox, *d_ybox, *d_box; checkCudaErrors(cudaMalloc(&d_xbox,8*sizeof(double))); checkCudaErrors(cudaMalloc(&d_ybox,8*sizeof(double))); checkCudaErrors(cudaMalloc(&d_box,16*sizeof(double))); checkCudaErrors(cudaMemcpy(d_xbox,xBoxPts,8*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_ybox,yBoxPts,8*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_box,boxPts,16*sizeof(double),cudaMemcpyHostToDevice)); // grid and faces double *d_xc, *d_yc, *d_xf, *d_yf, *d_cellCenters, *d_faceCenters; checkCudaErrors(cudaMalloc(&d_xc,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_yc,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_xf,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_yf,size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_cellCenters,2*size_c*sizeof(double))); checkCudaErrors(cudaMalloc(&d_faceCenters,2*size_f*sizeof(double))); checkCudaErrors(cudaMemcpy(d_xc,xc,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_yc,yc,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_xf,xf,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_yf,yf,size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_cellCenters,cellCenters,2*size_c*sizeof(double),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_faceCenters,faceCenters,2*size_f*sizeof(double),cudaMemcpyHostToDevice)); // auxCell structs struct cell_pt1 * d_compAuxCells; checkCudaErrors(cudaMalloc((void **)&d_compAuxCells,cellsWithFaces*sizeof(struct cell_pt1))); checkCudaErrors(cudaMemcpy(d_compAuxCells,compAuxCells,cellsWithFaces*sizeof(struct cell_pt1),cudaMemcpyHostToDevice)); // auxCenter array double *d_auxCenters; checkCudaErrors(cudaMalloc(&d_auxCenters,2*cellsWithFaces*sizeof(double))); checkCudaErrors(cudaMemcpy(d_auxCenters,auxCenters,2*cellsWithFaces*sizeof(double),cudaMemcpyHostToDevice)); // wallDist array double *d_wallDist; checkCudaErrors(cudaMalloc(&d_wallDist,size_c*sizeof(double))); checkCudaErrors(cudaMemcpy(d_wallDist,wallDist,size_c*sizeof(double),cudaMemcpyHostToDevice)); //////////////////////////////////////////////////////////////////// // Wall Distance Calc //////////////////////////////////////////////////////////////////// GpuTimer timer; int threadsPerBlock, numBlocks; threadsPerBlock = 512; numBlocks = (size_c/threadsPerBlock)+1; timer.Start(); wd_ab_parallel_t1<<<numBlocks,threadsPerBlock>>>(d_xc,d_yc,d_xf,d_yf,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T1(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // Reset wallDistance checkCudaErrors(cudaMemcpy(d_wallDist,wallDist,size_c*sizeof(double),cudaMemcpyHostToDevice)); timer.Start(); wd_ab_parallel_t2<<<numBlocks,threadsPerBlock,2*threadsPerBlock*sizeof(double)>>>(d_cellCenters,d_xf,d_yf,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T2(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // timer.Start(); // wd_ab_parallel_t3<<<numBlocks,threadsPerBlock,2*size_f*sizeof(double)>>>(d_cellCenters,d_faceCenters,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); // timer.Stop(); // printf("Advancing boundary - parallel T3(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); wd_ab_parallel_t4<<<numBlocks,threadsPerBlock>>>(d_cellCenters,d_faceCenters,d_xbox,d_ybox,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T4(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); wd_ab_parallel_t5<<<numBlocks,threadsPerBlock,16*sizeof(double)>>>(d_cellCenters,d_faceCenters,d_box,d_compAuxCells,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T5(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); wd_ab_parallel_t6<<<numBlocks,threadsPerBlock,16*sizeof(double)>>>(d_cellCenters,d_faceCenters,d_box,d_compAuxCells,d_auxCenters,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T6(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); timer.Start(); wd_ab_parallel_t7<<<numBlocks,threadsPerBlock,16*sizeof(double)>>>(d_cellCenters,d_faceCenters,d_box,d_compAuxCells,d_auxCenters,size_c,size_f,cellsWithFaces,auxDiag,d_wallDist); timer.Stop(); printf("Advancing boundary - parallel T6(GpuTimer): \t %.0f milliseconds\n",timer.Elapsed()); // Copy wallDist back to host checkCudaErrors(cudaMemcpy(wallDist,d_wallDist,sizeof(double)*size_c,cudaMemcpyDeviceToHost)); //////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////// }
8623eaac560c42f36f1808a8f6732014faac0fac.hip
// !!! This is a file automatically generated by hipify!!! /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! \file DriverTersoffGPU.cu \brief Defines the driver functions for computing all types of three-body forces on the GPU */ #include "DriverTersoffGPU.cuh" #include "EvaluatorTersoff.h" hipError_t gpu_compute_tersoff_forces(const tersoff_args_t& pair_args, const tersoff_params *d_params) { return gpu_compute_triplet_forces<EvaluatorTersoff>(pair_args, d_params); }
8623eaac560c42f36f1808a8f6732014faac0fac.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! \file DriverTersoffGPU.cu \brief Defines the driver functions for computing all types of three-body forces on the GPU */ #include "DriverTersoffGPU.cuh" #include "EvaluatorTersoff.h" cudaError_t gpu_compute_tersoff_forces(const tersoff_args_t& pair_args, const tersoff_params *d_params) { return gpu_compute_triplet_forces<EvaluatorTersoff>(pair_args, d_params); }
4c194c2042ba1fbd8a679e1bb5fdebf2f84daee1.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/div_rtn.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/hip/im2col.cuh> #include <ATen/native/im2col_shape_check.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty_like.h> #include <ATen/ops/col2im_native.h> #include <ATen/ops/im2col_native.h> #endif namespace at::native { namespace { static void im2col_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TORCH_CHECK( kernel_size.size() == 2, "It is expected kernel_size equals to 2, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 2, "It is expected dilation equals to 2, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 2, "It is expected padding equals to 2, but got size ", padding.size()); TORCH_CHECK( stride.size() == 2, "It is expected stride equals to 2, but got size ", stride.size()); int64_t kernel_height = kernel_size[0]; int64_t kernel_width = kernel_size[1]; int64_t dilation_height = dilation[0]; int64_t dilation_width = dilation[1]; int64_t pad_height = padding[0]; int64_t pad_width = padding[1]; int64_t stride_height = stride[0]; int64_t stride_width = stride[1]; TensorArg input_arg{input_, "input", 1}; TensorArg output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); im2col_shape_check( input_, Tensor(), kernel_height, kernel_width, dilation_height, dilation_width, pad_height, pad_width, stride_height, stride_width); Tensor input = input_.contiguous(); bool batched_input = true; if (input.dim() == 3) { batched_input = false; input = input.view({1, input.size(0), input.size(1), input.size(2)}); } int64_t batch_size = input.size(0); int64_t n_input_plane = input.size(1); int64_t input_height = input.size(2); int64_t input_width = input.size(3); int64_t output_height = (input_height + 2 * pad_height - (dilation_height * (kernel_height - 1) + 1)) / stride_height + 1; int64_t output_width = (input_width + 2 * pad_width - (dilation_width * (kernel_width - 1) + 1)) / stride_width + 1; int64_t n_output_plane = n_input_plane * kernel_width * kernel_height; int64_t output_length = output_height * output_width; output.resize_({batch_size, n_output_plane, output_length}); // Launch kernel AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "im2col_out_cuda", [&] { Tensor input_n; Tensor output_n; for (int64_t elt = 0; elt < batch_size; elt++) { input_n = input.select(0, elt); output_n = output.select(0, elt); im2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.const_data_ptr<scalar_t>(), n_input_plane, input_height, input_width, output_height, output_width, kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, dilation_height, dilation_width, output_n.mutable_data_ptr<scalar_t>()); } if (!batched_input) { output.resize_({n_output_plane, output_length}); } }); } } // namespace Tensor& im2col_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& output) { im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } Tensor im2col_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } } // namespace at::native
4c194c2042ba1fbd8a679e1bb5fdebf2f84daee1.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/div_rtn.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/im2col.cuh> #include <ATen/native/im2col_shape_check.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty_like.h> #include <ATen/ops/col2im_native.h> #include <ATen/ops/im2col_native.h> #endif namespace at::native { namespace { static void im2col_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TORCH_CHECK( kernel_size.size() == 2, "It is expected kernel_size equals to 2, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 2, "It is expected dilation equals to 2, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 2, "It is expected padding equals to 2, but got size ", padding.size()); TORCH_CHECK( stride.size() == 2, "It is expected stride equals to 2, but got size ", stride.size()); int64_t kernel_height = kernel_size[0]; int64_t kernel_width = kernel_size[1]; int64_t dilation_height = dilation[0]; int64_t dilation_width = dilation[1]; int64_t pad_height = padding[0]; int64_t pad_width = padding[1]; int64_t stride_height = stride[0]; int64_t stride_width = stride[1]; TensorArg input_arg{input_, "input", 1}; TensorArg output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); im2col_shape_check( input_, Tensor(), kernel_height, kernel_width, dilation_height, dilation_width, pad_height, pad_width, stride_height, stride_width); Tensor input = input_.contiguous(); bool batched_input = true; if (input.dim() == 3) { batched_input = false; input = input.view({1, input.size(0), input.size(1), input.size(2)}); } int64_t batch_size = input.size(0); int64_t n_input_plane = input.size(1); int64_t input_height = input.size(2); int64_t input_width = input.size(3); int64_t output_height = (input_height + 2 * pad_height - (dilation_height * (kernel_height - 1) + 1)) / stride_height + 1; int64_t output_width = (input_width + 2 * pad_width - (dilation_width * (kernel_width - 1) + 1)) / stride_width + 1; int64_t n_output_plane = n_input_plane * kernel_width * kernel_height; int64_t output_length = output_height * output_width; output.resize_({batch_size, n_output_plane, output_length}); // Launch kernel AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "im2col_out_cuda", [&] { Tensor input_n; Tensor output_n; for (int64_t elt = 0; elt < batch_size; elt++) { input_n = input.select(0, elt); output_n = output.select(0, elt); im2col<scalar_t>( at::cuda::getCurrentCUDAStream(), input_n.const_data_ptr<scalar_t>(), n_input_plane, input_height, input_width, output_height, output_width, kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, dilation_height, dilation_width, output_n.mutable_data_ptr<scalar_t>()); } if (!batched_input) { output.resize_({n_output_plane, output_length}); } }); } } // namespace Tensor& im2col_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& output) { im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } Tensor im2col_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } } // namespace at::native
c443bf00ab2f1aee267d29c643310074438387a6.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathPointwise.cuh" #include "THHTensor.hpp" #include "../generic/THCTensorMathPointwise.cu" #include <THH/THHGenerateCharType.h>
c443bf00ab2f1aee267d29c643310074438387a6.cu
#include "../THCTensorMathPointwise.cuh" #include "THCTensor.hpp" #include "../generic/THCTensorMathPointwise.cu" #include <THC/THCGenerateCharType.h>
6c91e956fa79d6cad9899211ad9cb41178feaa81.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #define MAX_TEXT_BLOCKS 100 #define MAX_CHAR_SIZE 10000 #define BUFFER_SIZE (MAX_TEXT_BLOCKS * MAX_CHAR_SIZE) __device__ char TEXT_BUFFER[MAX_TEXT_BLOCKS][MAX_CHAR_SIZE]; __device__ bool MATCHES[MAX_TEXT_BLOCKS]; /** * CUDA Kernel Device code * * Searches for a given phrase in the item reviews for a given file, lines must be prefixed with 'review/text'. */ __global__ void wordSearch(int itemsInBuffer, char* phrase, int phraseLen) { printf("wordSearch() is called, blockIdx is: %d, phrase is %s, phraseLen is %d, TEXT_BUFFER[%d] is: %s, ", blockIdx.x, phrase, phraseLen, blockIdx.x, TEXT_BUFFER[blockIdx.x]); if (blockIdx.x < itemsInBuffer) { int strLen = 0; for (int i = 0; TEXT_BUFFER[blockIdx.x][i] != '\0'; i++) { strLen++; } for (int charIdx = 0; charIdx < strLen; charIdx++) { if (TEXT_BUFFER[blockIdx.x][charIdx] == phrase[0]) { int wordLen = 1; charIdx++; while (TEXT_BUFFER[blockIdx.x][charIdx] == phrase[wordLen] && wordLen < phraseLen) { charIdx++; wordLen++; if (wordLen == phraseLen) { MATCHES[blockIdx.x] = 1; printf("MATCHES[%d] = %d text is %s\n", blockIdx.x, MATCHES[blockIdx.x], TEXT_BUFFER[blockIdx.x]); return; } } } } } MATCHES[blockIdx.x] = 0; } /* HOST MAIN ROUTINE */ int main(int argc, char **argv) { if (argc < 3) { printf("Insufficient arguments\n"); exit(0); } char* filePath = argv[1]; char* phrase = argv[2]; int phraseLen = 0; for (int i = 0; phrase[i] != '\0'; i++) { if (i == 1000) break; phraseLen++; } // Error code to check return values for CUDA calls hipError_t err = hipSuccess; const int h_bufferSize = MAX_CHAR_SIZE * MAX_TEXT_BLOCKS; // 4000000 bytes of chars = 4MB const char* reviewIdentifier = "review/text"; char hBuffer[MAX_TEXT_BLOCKS][MAX_CHAR_SIZE]; printf("Size of hbuffer: %d", (int)sizeof(hBuffer)); bool *matches = (bool *)malloc(sizeof(bool) * MAX_TEXT_BLOCKS); if (hBuffer == NULL) { printf("Failed to create buffer\n"); } char* hBufferPtr = hBuffer[0]; printf("[char text blocks of %d chars max]\n", MAX_CHAR_SIZE); printf("[host buffer of %d chars\n", h_bufferSize); /* DEVICE ALLOCATE MEMORY */ /* Allocate memory for global variable MATCHES on device */ err = hipMalloc((void **)&MATCHES, sizeof(bool) * MAX_TEXT_BLOCKS); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate matches array matches (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Allocate memory on device for command line argument phrase */ char *d_phrase = NULL; size_t d_phraseSize = phraseLen * sizeof(char); err = hipMalloc((void **)&d_phrase, d_phraseSize); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate char array phrase (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Allocate memory on device for phrase length */ int *d_phraseLen = NULL; size_t d_phraseLenSize = sizeof(int); err = hipMalloc((void **)&d_phraseLen, d_phraseLenSize); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate phrase length phraseLen (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } FILE *file = fopen(filePath, "r"); int lineId = 0; int lineCount = 0; if (file != NULL) { char line[MAX_CHAR_SIZE]; if (line == NULL) { printf("Failed to allocate text block!\n"); exit(EXIT_FAILURE); } while (fgets(line, MAX_CHAR_SIZE, file) != NULL && lineCount < MAX_TEXT_BLOCKS + 1) { size_t tempLen = strlen(reviewIdentifier); char firstWord[tempLen]; strncpy(firstWord, line, tempLen); if (strncmp(reviewIdentifier, firstWord, tempLen) == 0) { if (lineCount % 10 == 0) { printf("Line: %d\n", lineCount); } hBufferPtr = hBuffer[lineCount]; // shift ptr to next block strncpy(hBufferPtr, line, MAX_CHAR_SIZE); lineCount++; /* Host text buffer hBuffer is full and ready to be copied to device TEXT_BUFFER */ if (lineCount == MAX_TEXT_BLOCKS) { printf("[Host buffer is full, copying memory to device]\n"); /* COPY MEMORY FROM HOST TO DEVICE*/ /* Copy text buffer to device*/ printf("[Copying text buffer to device...]\n"); for (int i = 0; i < MAX_TEXT_BLOCKS; i++) { printf("%s\n", hBuffer[i]); } err = hipMemcpyToSymbol(TEXT_BUFFER, hBuffer, BUFFER_SIZE, 0, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy text buffer hBuffer (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Copy phrase to device */ printf("[Copying phrase to device...]\n"); err = hipMemcpy(d_phrase, phrase, d_phraseSize, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy phrase d_phrase (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Copy phrase length to device */ printf("[Copying phrase len to device...]\n"); err = hipMemcpy(d_phraseLen, &phraseLen, d_phraseLenSize, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy phrase len d_phraseLen (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Initialize and launch CUDA kernel */ printf("[Initializing CUDA kernel and launching]\n"); hipLaunchKernelGGL(( wordSearch), dim3(MAX_TEXT_BLOCKS),dim3(1), 0, 0, MAX_TEXT_BLOCKS, d_phrase, phraseLen); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // hipFree(d_phrase); // hipFree(d_phraseLen); /* Retrieve MATCHES result from device to host*/ printf("[Copying device memory matches to host memory]\n"); err = hipMemcpyFromSymbol(matches, MATCHES, sizeof(bool) * MAX_TEXT_BLOCKS, 0, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy device matches array to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } for (int i = 0; i < sizeof(bool) * MAX_TEXT_BLOCKS; i++) { if (matches[i] == 1) { printf("1"); } else { printf("0"); } } printf("\n"); // exit(0); // end cuda // lineCount = 0; // printf("[Setting hbuffer to empty via memset\n"); // memset(&hBuffer[0], 0, sizeof(hBuffer)); // clear the buffer // set the buffer ptr back to the beginning // printf("[Settings hBufferPtr back to the beginning\n"); // hBufferPtr = hBuffer[0]; break; } } lineId++; } } else { printf("Could not read file %s", filePath); return 0; } printf("End CUDA search\n"); fclose(file); free(matches); hipFree(d_phrase); hipFree(d_phraseLen); printf("Done\n"); return 0; }
6c91e956fa79d6cad9899211ad9cb41178feaa81.cu
#include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #define MAX_TEXT_BLOCKS 100 #define MAX_CHAR_SIZE 10000 #define BUFFER_SIZE (MAX_TEXT_BLOCKS * MAX_CHAR_SIZE) __device__ char TEXT_BUFFER[MAX_TEXT_BLOCKS][MAX_CHAR_SIZE]; __device__ bool MATCHES[MAX_TEXT_BLOCKS]; /** * CUDA Kernel Device code * * Searches for a given phrase in the item reviews for a given file, lines must be prefixed with 'review/text'. */ __global__ void wordSearch(int itemsInBuffer, char* phrase, int phraseLen) { printf("wordSearch() is called, blockIdx is: %d, phrase is %s, phraseLen is %d, TEXT_BUFFER[%d] is: %s, ", blockIdx.x, phrase, phraseLen, blockIdx.x, TEXT_BUFFER[blockIdx.x]); if (blockIdx.x < itemsInBuffer) { int strLen = 0; for (int i = 0; TEXT_BUFFER[blockIdx.x][i] != '\0'; i++) { strLen++; } for (int charIdx = 0; charIdx < strLen; charIdx++) { if (TEXT_BUFFER[blockIdx.x][charIdx] == phrase[0]) { int wordLen = 1; charIdx++; while (TEXT_BUFFER[blockIdx.x][charIdx] == phrase[wordLen] && wordLen < phraseLen) { charIdx++; wordLen++; if (wordLen == phraseLen) { MATCHES[blockIdx.x] = 1; printf("MATCHES[%d] = %d text is %s\n", blockIdx.x, MATCHES[blockIdx.x], TEXT_BUFFER[blockIdx.x]); return; } } } } } MATCHES[blockIdx.x] = 0; } /* HOST MAIN ROUTINE */ int main(int argc, char **argv) { if (argc < 3) { printf("Insufficient arguments\n"); exit(0); } char* filePath = argv[1]; char* phrase = argv[2]; int phraseLen = 0; for (int i = 0; phrase[i] != '\0'; i++) { if (i == 1000) break; phraseLen++; } // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; const int h_bufferSize = MAX_CHAR_SIZE * MAX_TEXT_BLOCKS; // 4000000 bytes of chars = 4MB const char* reviewIdentifier = "review/text"; char hBuffer[MAX_TEXT_BLOCKS][MAX_CHAR_SIZE]; printf("Size of hbuffer: %d", (int)sizeof(hBuffer)); bool *matches = (bool *)malloc(sizeof(bool) * MAX_TEXT_BLOCKS); if (hBuffer == NULL) { printf("Failed to create buffer\n"); } char* hBufferPtr = hBuffer[0]; printf("[char text blocks of %d chars max]\n", MAX_CHAR_SIZE); printf("[host buffer of %d chars\n", h_bufferSize); /* DEVICE ALLOCATE MEMORY */ /* Allocate memory for global variable MATCHES on device */ err = cudaMalloc((void **)&MATCHES, sizeof(bool) * MAX_TEXT_BLOCKS); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate matches array matches (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Allocate memory on device for command line argument phrase */ char *d_phrase = NULL; size_t d_phraseSize = phraseLen * sizeof(char); err = cudaMalloc((void **)&d_phrase, d_phraseSize); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate char array phrase (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Allocate memory on device for phrase length */ int *d_phraseLen = NULL; size_t d_phraseLenSize = sizeof(int); err = cudaMalloc((void **)&d_phraseLen, d_phraseLenSize); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate phrase length phraseLen (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } FILE *file = fopen(filePath, "r"); int lineId = 0; int lineCount = 0; if (file != NULL) { char line[MAX_CHAR_SIZE]; if (line == NULL) { printf("Failed to allocate text block!\n"); exit(EXIT_FAILURE); } while (fgets(line, MAX_CHAR_SIZE, file) != NULL && lineCount < MAX_TEXT_BLOCKS + 1) { size_t tempLen = strlen(reviewIdentifier); char firstWord[tempLen]; strncpy(firstWord, line, tempLen); if (strncmp(reviewIdentifier, firstWord, tempLen) == 0) { if (lineCount % 10 == 0) { printf("Line: %d\n", lineCount); } hBufferPtr = hBuffer[lineCount]; // shift ptr to next block strncpy(hBufferPtr, line, MAX_CHAR_SIZE); lineCount++; /* Host text buffer hBuffer is full and ready to be copied to device TEXT_BUFFER */ if (lineCount == MAX_TEXT_BLOCKS) { printf("[Host buffer is full, copying memory to device]\n"); /* COPY MEMORY FROM HOST TO DEVICE*/ /* Copy text buffer to device*/ printf("[Copying text buffer to device...]\n"); for (int i = 0; i < MAX_TEXT_BLOCKS; i++) { printf("%s\n", hBuffer[i]); } err = cudaMemcpyToSymbol(TEXT_BUFFER, hBuffer, BUFFER_SIZE, 0, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy text buffer hBuffer (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Copy phrase to device */ printf("[Copying phrase to device...]\n"); err = cudaMemcpy(d_phrase, phrase, d_phraseSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy phrase d_phrase (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Copy phrase length to device */ printf("[Copying phrase len to device...]\n"); err = cudaMemcpy(d_phraseLen, &phraseLen, d_phraseLenSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy phrase len d_phraseLen (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Initialize and launch CUDA kernel */ printf("[Initializing CUDA kernel and launching]\n"); wordSearch<<<MAX_TEXT_BLOCKS,1>>>(MAX_TEXT_BLOCKS, d_phrase, phraseLen); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // cudaFree(d_phrase); // cudaFree(d_phraseLen); /* Retrieve MATCHES result from device to host*/ printf("[Copying device memory matches to host memory]\n"); err = cudaMemcpyFromSymbol(matches, MATCHES, sizeof(bool) * MAX_TEXT_BLOCKS, 0, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy device matches array to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for (int i = 0; i < sizeof(bool) * MAX_TEXT_BLOCKS; i++) { if (matches[i] == 1) { printf("1"); } else { printf("0"); } } printf("\n"); // exit(0); // end cuda // lineCount = 0; // printf("[Setting hbuffer to empty via memset\n"); // memset(&hBuffer[0], 0, sizeof(hBuffer)); // clear the buffer // set the buffer ptr back to the beginning // printf("[Settings hBufferPtr back to the beginning\n"); // hBufferPtr = hBuffer[0]; break; } } lineId++; } } else { printf("Could not read file %s", filePath); return 0; } printf("End CUDA search\n"); fclose(file); free(matches); cudaFree(d_phrase); cudaFree(d_phraseLen); printf("Done\n"); return 0; }
9a51a5bb931c408acbac3559f32112f2fc5e597d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define N 16 __global__ void add(int* a, int* b, int* c) { int localIdx = blockIdx.x*blockDim.x + threadIdx.x; if(localIdx < N) { c[localIdx] = a[localIdx] + b[localIdx]; } } int main( int argc, char** argv ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // Initialize arrays a and b with data for (int i=0; i < N; i++) { a[i] = 2*i; b[i] = -i; } // Allocate memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // Copy the data from host to GPU memory hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ); // Compute the number of block necessary based on a constant number of threads per block // Be careful - this can launch more threads than we need, we need to handle this in the kernel! int threadsPerBlock = 1024; int blocks = (int)ceil((float)N/threadsPerBlock); // Launch the kernel hipLaunchKernelGGL(( add), dim3(blocks),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_c); hipError_t syncErrCode = hipGetLastError(); hipError_t asyncErrCode = hipDeviceSynchronize(); if (syncErrCode != hipSuccess) { printf("Kernel error: %s\n", hipGetErrorString(syncErrCode)); } if (asyncErrCode != hipSuccess) { printf("Kernel error: %s\n", hipGetErrorString(asyncErrCode)); } // Move the result back from the GPU to the host hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ); for (int i=0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // Always free the memory you explicitly allocated hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; }
9a51a5bb931c408acbac3559f32112f2fc5e597d.cu
#include <stdio.h> #include <math.h> #define N 16 __global__ void add(int* a, int* b, int* c) { int localIdx = blockIdx.x*blockDim.x + threadIdx.x; if(localIdx < N) { c[localIdx] = a[localIdx] + b[localIdx]; } } int main( int argc, char** argv ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // Initialize arrays a and b with data for (int i=0; i < N; i++) { a[i] = 2*i; b[i] = -i; } // Allocate memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // Copy the data from host to GPU memory cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); // Compute the number of block necessary based on a constant number of threads per block // Be careful - this can launch more threads than we need, we need to handle this in the kernel! int threadsPerBlock = 1024; int blocks = (int)ceil((float)N/threadsPerBlock); // Launch the kernel add<<<blocks,threadsPerBlock>>>(dev_a, dev_b, dev_c); cudaError_t syncErrCode = cudaGetLastError(); cudaError_t asyncErrCode = cudaDeviceSynchronize(); if (syncErrCode != cudaSuccess) { printf("Kernel error: %s\n", cudaGetErrorString(syncErrCode)); } if (asyncErrCode != cudaSuccess) { printf("Kernel error: %s\n", cudaGetErrorString(asyncErrCode)); } // Move the result back from the GPU to the host cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); for (int i=0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // Always free the memory you explicitly allocated cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
004773c639d4a765a7756fc156f7344800f7d359.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) { for (int i=0; i < var_1; ++i) { if (comp > var_2 / var_3 + (var_4 / -1.7202E-37f)) { if (comp >= (+1.5528E-41f - +1.8974E35f * (var_5 / var_6))) { if (comp <= (var_7 + (var_8 / +0.0f))) { comp += (var_9 / -1.6850E36f); comp += (var_10 + +1.3065E34f); float tmp_1 = +1.6323E35f; comp += tmp_1 + (var_11 / -0.0f / (var_12 + var_13 + var_14)); if (comp >= (var_15 * (+1.1480E-36f - fabsf((var_16 + var_17))))) { float tmp_2 = +0.0f; comp += tmp_2 * (var_18 / -1.6624E-43f * +0.0f); } if (comp > (var_19 * var_20 / (-1.0113E-36f - -1.4493E-43f))) { comp += +1.8702E-41f + var_21; comp += (var_22 / ceilf(var_23 - (var_24 / -1.2063E-29f * (var_25 * var_26 - var_27)))); comp += (-0.0f + (-1.4395E28f * cosf(-0.0f))); } } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28); hipDeviceSynchronize(); return 0; }
004773c639d4a765a7756fc156f7344800f7d359.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) { for (int i=0; i < var_1; ++i) { if (comp > var_2 / var_3 + (var_4 / -1.7202E-37f)) { if (comp >= (+1.5528E-41f - +1.8974E35f * (var_5 / var_6))) { if (comp <= (var_7 + (var_8 / +0.0f))) { comp += (var_9 / -1.6850E36f); comp += (var_10 + +1.3065E34f); float tmp_1 = +1.6323E35f; comp += tmp_1 + (var_11 / -0.0f / (var_12 + var_13 + var_14)); if (comp >= (var_15 * (+1.1480E-36f - fabsf((var_16 + var_17))))) { float tmp_2 = +0.0f; comp += tmp_2 * (var_18 / -1.6624E-43f * +0.0f); } if (comp > (var_19 * var_20 / (-1.0113E-36f - -1.4493E-43f))) { comp += +1.8702E-41f + var_21; comp += (var_22 / ceilf(var_23 - (var_24 / -1.2063E-29f * (var_25 * var_26 - var_27)))); comp += (-0.0f + (-1.4395E28f * cosf(-0.0f))); } } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28); cudaDeviceSynchronize(); return 0; }
7c20f6f270259cb127997d2633c0e5cf971d0c57.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2010. Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-461231 All rights reserved. This file is part of LULESH, Version 1.0. Please also read this link -- http://www.opensource.org/licenses/index.php Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Additional BSD Notice 1. This notice is required to be provided under our contract with the U.S. Department of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE. 2. Neither the United States Government nor Lawrence Livermore National Security, LLC nor any of their employees, makes any warranty, express or implied, or assumes any liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately-owned rights. 3. Also, reference herein to any specific commercial products, process, or services by trade name, trademark, manufacturer or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes. */ #include <hip/hip_runtime.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include "../benchmark_common.h" #define LULESH_SHOW_PROGRESS 1 enum { VolumeError = -1, QStopError = -2 }; hipStream_t streamApp; /****************************************************/ /* Allow flexibility for arithmetic representations */ /****************************************************/ /* Could also support fixed point and interval arithmetic types */ typedef float real4; typedef double real8; typedef long double real10; /* 10 bytes on x86 */ typedef int Index_t; /* array subscript and loop index */ typedef real8 Real_t; /* floating point representation */ typedef int Int_t; /* integer representation */ __host__ __device__ inline real4 SQRT(real4 arg) { return sqrtf(arg); } __host__ __device__ inline real8 SQRT(real8 arg) { return sqrt(arg); } __host__ inline real10 SQRT(real10 arg) { return sqrtl(arg); } __host__ __device__ inline real4 CBRT(real4 arg) { return cbrtf(arg); } __host__ __device__ inline real8 CBRT(real8 arg) { return cbrt(arg); } __host__ inline real10 CBRT(real10 arg) { return cbrtl(arg); } __host__ __device__ inline real4 FABS(real4 arg) { return fabsf(arg); } __host__ __device__ inline real8 FABS(real8 arg) { return fabs(arg); } __host__ inline real10 FABS(real10 arg) { return fabsl(arg); } __host__ __device__ inline real4 FMAX(real4 arg1, real4 arg2) { return fmaxf(arg1, arg2); } __host__ __device__ inline real8 FMAX(real8 arg1, real8 arg2) { return fmax(arg1, arg2); } __host__ inline real10 FMAX(real10 arg1, real10 arg2) { return fmaxl(arg1, arg2); } /*#define CUDA_SAFE_CALL( call) do { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0) */ #define CUDA(call) CUDA_SAFE_CALL(call) #ifdef CUDA_SYNC_ALL #define CUDA_DEBUGSYNC CUDA(hipDeviceSynchronize()) #else #define CUDA_DEBUGSYNC #endif #define BLOCKSIZE 256 /* Given a number of bytes, nbytes, and a byte alignment, align, (e.g., 2, * 4, 8, or 16), return the smallest integer that is larger than nbytes and * a multiple of align. */ #define PAD_DIV(nbytes, align) (((nbytes) + (align)-1) / (align)) #define PAD(nbytes, align) (PAD_DIV((nbytes), (align)) * (align)) /* More general version of reduceInPlacePOT (this works for arbitrary * numThreadsPerBlock <= 1024). Again, conditionals on * numThreadsPerBlock are evaluated at compile time. */ template <class T, int numThreadsPerBlock> __device__ void reduceSum(T* sresult, const int threadID) { /* If number of threads is not a power of two, first add the ones after the last power of two into the beginning. At most one of these conditionals will be true for a given NPOT block size. */ if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024) { __syncthreads(); if (threadID < numThreadsPerBlock - 512) sresult[threadID] += sresult[threadID + 512]; } if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512) { __syncthreads(); if (threadID < numThreadsPerBlock - 256) sresult[threadID] += sresult[threadID + 256]; } if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256) { __syncthreads(); if (threadID < numThreadsPerBlock - 128) sresult[threadID] += sresult[threadID + 128]; } if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128) { __syncthreads(); if (threadID < numThreadsPerBlock - 64) sresult[threadID] += sresult[threadID + 64]; } if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64) { __syncthreads(); if (threadID < numThreadsPerBlock - 32) sresult[threadID] += sresult[threadID + 32]; } if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32) { __syncthreads(); if (threadID < numThreadsPerBlock - 16) sresult[threadID] += sresult[threadID + 16]; } if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16) { __syncthreads(); if (threadID < numThreadsPerBlock - 8) sresult[threadID] += sresult[threadID + 8]; } if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8) { __syncthreads(); if (threadID < numThreadsPerBlock - 4) sresult[threadID] += sresult[threadID + 4]; } if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4) { __syncthreads(); if (threadID < numThreadsPerBlock - 2) sresult[threadID] += sresult[threadID + 2]; } if (numThreadsPerBlock >= 512) { __syncthreads(); if (threadID < 256) sresult[threadID] += sresult[threadID + 256]; } if (numThreadsPerBlock >= 256) { __syncthreads(); if (threadID < 128) sresult[threadID] += sresult[threadID + 128]; } if (numThreadsPerBlock >= 128) { __syncthreads(); if (threadID < 64) sresult[threadID] += sresult[threadID + 64]; } __syncthreads(); #ifdef _DEVICEEMU if (numThreadsPerBlock >= 64) { __syncthreads(); if (threadID < 32) sresult[threadID] += sresult[threadID + 32]; } if (numThreadsPerBlock >= 32) { __syncthreads(); if (threadID < 16) sresult[threadID] += sresult[threadID + 16]; } if (numThreadsPerBlock >= 16) { __syncthreads(); if (threadID < 8) sresult[threadID] += sresult[threadID + 8]; } if (numThreadsPerBlock >= 8) { __syncthreads(); if (threadID < 4) sresult[threadID] += sresult[threadID + 4]; } if (numThreadsPerBlock >= 4) { __syncthreads(); if (threadID < 2) sresult[threadID] += sresult[threadID + 2]; } if (numThreadsPerBlock >= 2) { __syncthreads(); if (threadID < 1) sresult[threadID] += sresult[threadID + 1]; } #else if (threadID < 32) { volatile T* vol = sresult; if (numThreadsPerBlock >= 64) vol[threadID] += vol[threadID + 32]; if (numThreadsPerBlock >= 32) vol[threadID] += vol[threadID + 16]; if (numThreadsPerBlock >= 16) vol[threadID] += vol[threadID + 8]; if (numThreadsPerBlock >= 8) vol[threadID] += vol[threadID + 4]; if (numThreadsPerBlock >= 4) vol[threadID] += vol[threadID + 2]; if (numThreadsPerBlock >= 2) vol[threadID] += vol[threadID + 1]; } #endif __syncthreads(); } #define MINEQ(a, b) (a) = (((a) < (b)) ? (a) : (b)) template <class T, int numThreadsPerBlock> __device__ void reduceMin(T* sresult, const int threadID) { /* If number of threads is not a power of two, first add the ones after the last power of two into the beginning. At most one of these conditionals will be true for a given NPOT block size. */ if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024) { __syncthreads(); if (threadID < numThreadsPerBlock - 512) MINEQ(sresult[threadID], sresult[threadID + 512]); } if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512) { __syncthreads(); if (threadID < numThreadsPerBlock - 256) MINEQ(sresult[threadID], sresult[threadID + 256]); } if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256) { __syncthreads(); if (threadID < numThreadsPerBlock - 128) MINEQ(sresult[threadID], sresult[threadID + 128]); } if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128) { __syncthreads(); if (threadID < numThreadsPerBlock - 64) MINEQ(sresult[threadID], sresult[threadID + 64]); } if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64) { __syncthreads(); if (threadID < numThreadsPerBlock - 32) MINEQ(sresult[threadID], sresult[threadID + 32]); } if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32) { __syncthreads(); if (threadID < numThreadsPerBlock - 16) MINEQ(sresult[threadID], sresult[threadID + 16]); } if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16) { __syncthreads(); if (threadID < numThreadsPerBlock - 8) MINEQ(sresult[threadID], sresult[threadID + 8]); } if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8) { __syncthreads(); if (threadID < numThreadsPerBlock - 4) MINEQ(sresult[threadID], sresult[threadID + 4]); } if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4) { __syncthreads(); if (threadID < numThreadsPerBlock - 2) MINEQ(sresult[threadID], sresult[threadID + 2]); } if (numThreadsPerBlock >= 512) { __syncthreads(); if (threadID < 256) MINEQ(sresult[threadID], sresult[threadID + 256]); } if (numThreadsPerBlock >= 256) { __syncthreads(); if (threadID < 128) MINEQ(sresult[threadID], sresult[threadID + 128]); } if (numThreadsPerBlock >= 128) { __syncthreads(); if (threadID < 64) MINEQ(sresult[threadID], sresult[threadID + 64]); } __syncthreads(); #ifdef _DEVICEEMU if (numThreadsPerBlock >= 64) { __syncthreads(); if (threadID < 32) MINEQ(sresult[threadID], sresult[threadID + 32]); } if (numThreadsPerBlock >= 32) { __syncthreads(); if (threadID < 16) MINEQ(sresult[threadID], sresult[threadID + 16]); } if (numThreadsPerBlock >= 16) { __syncthreads(); if (threadID < 8) MINEQ(sresult[threadID], sresult[threadID + 8]); } if (numThreadsPerBlock >= 8) { __syncthreads(); if (threadID < 4) MINEQ(sresult[threadID], sresult[threadID + 4]); } if (numThreadsPerBlock >= 4) { __syncthreads(); if (threadID < 2) MINEQ(sresult[threadID], sresult[threadID + 2]); } if (numThreadsPerBlock >= 2) { __syncthreads(); if (threadID < 1) MINEQ(sresult[threadID], sresult[threadID + 1]); } #else if (threadID < 32) { volatile T* vol = sresult; if (numThreadsPerBlock >= 64) MINEQ(vol[threadID], vol[threadID + 32]); if (numThreadsPerBlock >= 32) MINEQ(vol[threadID], vol[threadID + 16]); if (numThreadsPerBlock >= 16) MINEQ(vol[threadID], vol[threadID + 8]); if (numThreadsPerBlock >= 8) MINEQ(vol[threadID], vol[threadID + 4]); if (numThreadsPerBlock >= 4) MINEQ(vol[threadID], vol[threadID + 2]); if (numThreadsPerBlock >= 2) MINEQ(vol[threadID], vol[threadID + 1]); } #endif __syncthreads(); } void cuda_init() { int deviceCount, dev; hipDeviceProp_t cuda_deviceProp; char* s; CUDA(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n"); exit(1); } if (s = getenv("CUDA_DEVICE")) dev = atoi(s); else dev = 0; if ((dev < 0) || (dev > deviceCount - 1)) { fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n", dev, 0, deviceCount - 1); exit(1); } CUDA(hipGetDeviceProperties(&cuda_deviceProp, dev)); if (cuda_deviceProp.major < 1) { fprintf(stderr, "cuda_init(): device %d does not support CUDA.\n", dev); exit(1); } fprintf(stderr, "setting CUDA device %d\n", dev); CUDA(hipSetDevice(dev)); } /************************************************************/ /* Allow for flexible data layout experiments by separating */ /* array interface from underlying implementation. */ /************************************************************/ struct Mesh { /* This first implementation allows for runnable code */ /* and is not meant to be optimal. Final implementation */ /* should separate declaration and allocation phases */ /* so that allocation can be scheduled in a cache conscious */ /* manner. */ friend struct MeshGPU; public: /**************/ /* Allocation */ /**************/ void AllocateNodalPersistent(size_t size) { m_x.resize(size); m_y.resize(size); m_z.resize(size); m_xd.resize(size, Real_t(0.)); m_yd.resize(size, Real_t(0.)); m_zd.resize(size, Real_t(0.)); m_xdd.resize(size, Real_t(0.)); m_ydd.resize(size, Real_t(0.)); m_zdd.resize(size, Real_t(0.)); m_fx.resize(size); m_fy.resize(size); m_fz.resize(size); m_nodalMass.resize(size, Real_t(0.)); } void AllocateElemPersistent(size_t size) { m_matElemlist.resize(size); m_nodelist.resize(8 * size); m_lxim.resize(size); m_lxip.resize(size); m_letam.resize(size); m_letap.resize(size); m_lzetam.resize(size); m_lzetap.resize(size); m_elemBC.resize(size); m_e.resize(size, Real_t(0.)); m_p.resize(size, Real_t(0.)); m_q.resize(size); m_ql.resize(size); m_qq.resize(size); m_v.resize(size, 1.0); m_volo.resize(size); m_delv.resize(size); m_vdov.resize(size); m_arealg.resize(size); m_ss.resize(size); m_elemMass.resize(size); } /* Temporaries should not be initialized in bulk but */ /* this is a runnable placeholder for now */ void AllocateElemTemporary(size_t size) { m_dxx.resize(size); m_dyy.resize(size); m_dzz.resize(size); m_delv_xi.resize(size); m_delv_eta.resize(size); m_delv_zeta.resize(size); m_delx_xi.resize(size); m_delx_eta.resize(size); m_delx_zeta.resize(size); m_vnew.resize(size); } void AllocateNodesets(size_t size) { m_symmX.resize(size); m_symmY.resize(size); m_symmZ.resize(size); } void AllocateNodeElemIndexes() { Index_t i, j, nidx; /* set up node-centered indexing of elements */ m_nodeElemCount.resize(m_numNode); for (i = 0; i < m_numNode; i++) m_nodeElemCount[i] = 0; m_nodeElemCornerList.resize(m_numNode * 8); for (i = 0; i < m_numElem; i++) { for (j = 0; j < 8; j++) { nidx = nodelist(i, j); m_nodeElemCornerList[nidx + m_numNode * m_nodeElemCount[nidx]++] = i + m_numElem * j; if (m_nodeElemCount[nidx] > 8) { fprintf(stderr, "Node degree is higher than 8!\n"); exit(1); } } } } /**********/ /* Access */ /**********/ /* Node-centered */ Real_t& x(Index_t idx) { return m_x[idx]; } Real_t& y(Index_t idx) { return m_y[idx]; } Real_t& z(Index_t idx) { return m_z[idx]; } Real_t& xd(Index_t idx) { return m_xd[idx]; } Real_t& yd(Index_t idx) { return m_yd[idx]; } Real_t& zd(Index_t idx) { return m_zd[idx]; } Real_t& xdd(Index_t idx) { return m_xdd[idx]; } Real_t& ydd(Index_t idx) { return m_ydd[idx]; } Real_t& zdd(Index_t idx) { return m_zdd[idx]; } Real_t& fx(Index_t idx) { return m_fx[idx]; } Real_t& fy(Index_t idx) { return m_fy[idx]; } Real_t& fz(Index_t idx) { return m_fz[idx]; } Real_t& nodalMass(Index_t idx) { return m_nodalMass[idx]; } Index_t& symmX(Index_t idx) { return m_symmX[idx]; } Index_t& symmY(Index_t idx) { return m_symmY[idx]; } Index_t& symmZ(Index_t idx) { return m_symmZ[idx]; } /* Element-centered */ Index_t& matElemlist(Index_t idx) { return m_matElemlist[idx]; } Index_t& nodelist(Index_t idx, Index_t nidx) { return m_nodelist[idx + nidx * m_numElem]; } Index_t& lxim(Index_t idx) { return m_lxim[idx]; } Index_t& lxip(Index_t idx) { return m_lxip[idx]; } Index_t& letam(Index_t idx) { return m_letam[idx]; } Index_t& letap(Index_t idx) { return m_letap[idx]; } Index_t& lzetam(Index_t idx) { return m_lzetam[idx]; } Index_t& lzetap(Index_t idx) { return m_lzetap[idx]; } Int_t& elemBC(Index_t idx) { return m_elemBC[idx]; } Real_t& dxx(Index_t idx) { return m_dxx[idx]; } Real_t& dyy(Index_t idx) { return m_dyy[idx]; } Real_t& dzz(Index_t idx) { return m_dzz[idx]; } Real_t& delv_xi(Index_t idx) { return m_delv_xi[idx]; } Real_t& delv_eta(Index_t idx) { return m_delv_eta[idx]; } Real_t& delv_zeta(Index_t idx) { return m_delv_zeta[idx]; } Real_t& delx_xi(Index_t idx) { return m_delx_xi[idx]; } Real_t& delx_eta(Index_t idx) { return m_delx_eta[idx]; } Real_t& delx_zeta(Index_t idx) { return m_delx_zeta[idx]; } Real_t& e(Index_t idx) { return m_e[idx]; } Real_t& p(Index_t idx) { return m_p[idx]; } Real_t& q(Index_t idx) { return m_q[idx]; } Real_t& ql(Index_t idx) { return m_ql[idx]; } Real_t& qq(Index_t idx) { return m_qq[idx]; } Real_t& v(Index_t idx) { return m_v[idx]; } Real_t& volo(Index_t idx) { return m_volo[idx]; } Real_t& vnew(Index_t idx) { return m_vnew[idx]; } Real_t& delv(Index_t idx) { return m_delv[idx]; } Real_t& vdov(Index_t idx) { return m_vdov[idx]; } Real_t& arealg(Index_t idx) { return m_arealg[idx]; } Real_t& ss(Index_t idx) { return m_ss[idx]; } Real_t& elemMass(Index_t idx) { return m_elemMass[idx]; } /* Params */ Real_t& dtfixed() { return m_dtfixed; } Real_t& time() { return m_time; } Real_t& deltatime() { return m_deltatime; } Real_t& deltatimemultlb() { return m_deltatimemultlb; } Real_t& deltatimemultub() { return m_deltatimemultub; } Real_t& stoptime() { return m_stoptime; } Real_t& u_cut() { return m_u_cut; } Real_t& hgcoef() { return m_hgcoef; } Real_t& qstop() { return m_qstop; } Real_t& monoq_max_slope() { return m_monoq_max_slope; } Real_t& monoq_limiter_mult() { return m_monoq_limiter_mult; } Real_t& e_cut() { return m_e_cut; } Real_t& p_cut() { return m_p_cut; } Real_t& ss4o3() { return m_ss4o3; } Real_t& q_cut() { return m_q_cut; } Real_t& v_cut() { return m_v_cut; } Real_t& qlc_monoq() { return m_qlc_monoq; } Real_t& qqc_monoq() { return m_qqc_monoq; } Real_t& qqc() { return m_qqc; } Real_t& eosvmax() { return m_eosvmax; } Real_t& eosvmin() { return m_eosvmin; } Real_t& pmin() { return m_pmin; } Real_t& emin() { return m_emin; } Real_t& dvovmax() { return m_dvovmax; } Real_t& refdens() { return m_refdens; } Real_t& dtcourant() { return m_dtcourant; } Real_t& dthydro() { return m_dthydro; } Real_t& dtmax() { return m_dtmax; } Int_t& cycle() { return m_cycle; } Index_t& sizeX() { return m_sizeX; } Index_t& sizeY() { return m_sizeY; } Index_t& sizeZ() { return m_sizeZ; } Index_t& numElem() { return m_numElem; } Index_t& numNode() { return m_numNode; } // private: /******************/ /* Implementation */ /******************/ /* Node-centered */ std::vector<Real_t> m_x; /* coordinates */ std::vector<Real_t> m_y; std::vector<Real_t> m_z; std::vector<Real_t> m_xd; /* velocities */ std::vector<Real_t> m_yd; std::vector<Real_t> m_zd; std::vector<Real_t> m_xdd; /* accelerations */ std::vector<Real_t> m_ydd; std::vector<Real_t> m_zdd; std::vector<Real_t> m_fx; /* forces */ std::vector<Real_t> m_fy; std::vector<Real_t> m_fz; std::vector<Real_t> m_nodalMass; /* mass */ std::vector<Index_t> m_symmX; /* symmetry plane nodesets */ std::vector<Index_t> m_symmY; std::vector<Index_t> m_symmZ; std::vector<Int_t> m_nodeElemCount; std::vector<Index_t> m_nodeElemCornerList; /* Element-centered */ std::vector<Index_t> m_matElemlist; /* material indexset */ std::vector<Index_t> m_nodelist; /* elemToNode connectivity */ std::vector<Index_t> m_lxim; /* element connectivity across each face */ std::vector<Index_t> m_lxip; std::vector<Index_t> m_letam; std::vector<Index_t> m_letap; std::vector<Index_t> m_lzetam; std::vector<Index_t> m_lzetap; std::vector<Int_t> m_elemBC; /* symmetry/-surface flags for each elem face */ std::vector<Real_t> m_dxx; /* principal strains -- temporary */ std::vector<Real_t> m_dyy; std::vector<Real_t> m_dzz; std::vector<Real_t> m_delv_xi; /* velocity gradient -- temporary */ std::vector<Real_t> m_delv_eta; std::vector<Real_t> m_delv_zeta; std::vector<Real_t> m_delx_xi; /* coordinate gradient -- temporary */ std::vector<Real_t> m_delx_eta; std::vector<Real_t> m_delx_zeta; std::vector<Real_t> m_e; /* energy */ std::vector<Real_t> m_p; /* pressure */ std::vector<Real_t> m_q; /* q */ std::vector<Real_t> m_ql; /* linear term for q */ std::vector<Real_t> m_qq; /* quadratic term for q */ std::vector<Real_t> m_v; /* relative volume */ std::vector<Real_t> m_volo; /* reference volume */ std::vector<Real_t> m_vnew; /* new relative volume -- temporary */ std::vector<Real_t> m_delv; /* m_vnew - m_v */ std::vector<Real_t> m_vdov; /* volume derivative over volume */ std::vector<Real_t> m_arealg; /* characteristic length of an element */ std::vector<Real_t> m_ss; /* "sound speed" */ std::vector<Real_t> m_elemMass; /* mass */ /* Parameters */ Real_t m_dtfixed; /* fixed time increment */ Real_t m_time; /* current time */ Real_t m_deltatime; /* variable time increment */ Real_t m_deltatimemultlb; Real_t m_deltatimemultub; Real_t m_stoptime; /* end time for simulation */ Real_t m_u_cut; /* velocity tolerance */ Real_t m_hgcoef; /* hourglass control */ Real_t m_qstop; /* excessive q indicator */ Real_t m_monoq_max_slope; Real_t m_monoq_limiter_mult; Real_t m_e_cut; /* energy tolerance */ Real_t m_p_cut; /* pressure tolerance */ Real_t m_ss4o3; Real_t m_q_cut; /* q tolerance */ Real_t m_v_cut; /* relative volume tolerance */ Real_t m_qlc_monoq; /* linear term coef for q */ Real_t m_qqc_monoq; /* quadratic term coef for q */ Real_t m_qqc; Real_t m_eosvmax; Real_t m_eosvmin; Real_t m_pmin; /* pressure floor */ Real_t m_emin; /* energy floor */ Real_t m_dvovmax; /* maximum allowable volume change */ Real_t m_refdens; /* reference density */ Real_t m_dtcourant; /* courant constraint */ Real_t m_dthydro; /* volume change constraint */ Real_t m_dtmax; /* maximum allowable time increment */ Int_t m_cycle; /* iteration count for simulation */ Index_t m_sizeX; /* X,Y,Z extent of this block */ Index_t m_sizeY; Index_t m_sizeZ; Index_t m_numElem; /* Elements/Nodes in this domain */ Index_t m_numNode; } mesh; template <typename T> T* Allocate(size_t size) { return static_cast<T*>(malloc(sizeof(T) * size)); } template <typename T> void Release(T** ptr) { if (*ptr != NULL) { free(*ptr); *ptr = NULL; } } #define GPU_STALE 0 #define CPU_STALE 1 #define ALL_FRESH 2 template <typename T> void freshenGPU(std::vector<T>& cpu, T** gpu, int& stale) { if (stale != GPU_STALE) return; if (!(*gpu)) { CUDA(hipMalloc(gpu, sizeof(T) * cpu.size())); } CUDA(hipMemcpyAsync(*gpu, &cpu[0], sizeof(T) * cpu.size(), hipMemcpyHostToDevice, streamApp)); stale = ALL_FRESH; } template <typename T> void freshenCPU(std::vector<T>& cpu, T* gpu, int& stale) { if (stale != CPU_STALE) return; if (!gpu) { fprintf(stderr, "freshenCPU(): NULL GPU data!\n"); exit(1); } CUDA(hipMemcpyAsync(&cpu[0], gpu, sizeof(T) * cpu.size(), hipMemcpyDeviceToHost, streamApp)); stale = ALL_FRESH; } // freshen helpers #define FC(var) \ freshenCPU(mesh.m_##var, meshGPU.m_##var, \ meshGPU.m_##var##_stale); // freshen CPU #define FG(var) \ freshenGPU(mesh.m_##var, &meshGPU.m_##var, \ meshGPU.m_##var##_stale); // freshen GPU // stale helpers #define SC(var) meshGPU.m_##var##_stale = CPU_STALE; // stale CPU #define SG(var) meshGPU.m_##var##_stale = GPU_STALE; // stale GPU struct MeshGPU { Mesh* m_mesh; /******************/ /* Implementation */ /******************/ /* Node-centered */ Real_t* m_x; /* coordinates */ Real_t* m_y; Real_t* m_z; Real_t* m_xd; /* velocities */ Real_t* m_yd; Real_t* m_zd; Real_t* m_xdd; /* accelerations */ Real_t* m_ydd; Real_t* m_zdd; Real_t* m_fx; /* forces */ Real_t* m_fy; Real_t* m_fz; Real_t* m_nodalMass; /* mass */ Index_t* m_symmX; /* symmetry plane nodesets */ Index_t* m_symmY; Index_t* m_symmZ; Int_t* m_nodeElemCount; Index_t* m_nodeElemCornerList; /* Element-centered */ Index_t* m_matElemlist; /* material indexset */ Index_t* m_nodelist; /* elemToNode connectivity */ Index_t* m_lxim; /* element connectivity across each face */ Index_t* m_lxip; Index_t* m_letam; Index_t* m_letap; Index_t* m_lzetam; Index_t* m_lzetap; Int_t* m_elemBC; /* symmetry/free-surface flags for each elem face */ Real_t* m_dxx; /* principal strains -- temporary */ Real_t* m_dyy; Real_t* m_dzz; Real_t* m_delv_xi; /* velocity gradient -- temporary */ Real_t* m_delv_eta; Real_t* m_delv_zeta; Real_t* m_delx_xi; /* coordinate gradient -- temporary */ Real_t* m_delx_eta; Real_t* m_delx_zeta; Real_t* m_e; /* energy */ Real_t* m_p; /* pressure */ Real_t* m_q; /* q */ Real_t* m_ql; /* linear term for q */ Real_t* m_qq; /* quadratic term for q */ Real_t* m_v; /* relative volume */ Real_t* m_volo; /* reference volume */ Real_t* m_vnew; /* new relative volume -- temporary */ Real_t* m_delv; /* m_vnew - m_v */ Real_t* m_vdov; /* volume derivative over volume */ Real_t* m_arealg; /* characteristic length of an element */ Real_t* m_ss; /* "sound speed" */ Real_t* m_elemMass; /* mass */ /* Stale flags */ int m_x_stale, m_y_stale, m_z_stale; int m_xd_stale, m_yd_stale, m_zd_stale; int m_xdd_stale, m_ydd_stale, m_zdd_stale; int m_fx_stale, m_fy_stale, m_fz_stale; int m_nodalMass_stale; int m_symmX_stale, m_symmY_stale, m_symmZ_stale; int m_nodeElemCount_stale, m_nodeElemCornerList_stale; int m_matElemlist_stale, m_nodelist_stale; int m_lxim_stale, m_lxip_stale, m_letam_stale, m_letap_stale, m_lzetam_stale, m_lzetap_stale; int m_elemBC_stale; int m_dxx_stale, m_dyy_stale, m_dzz_stale; int m_delv_xi_stale, m_delv_eta_stale, m_delv_zeta_stale; int m_delx_xi_stale, m_delx_eta_stale, m_delx_zeta_stale; int m_e_stale; int m_p_stale, m_q_stale, m_ql_stale, m_qq_stale; int m_v_stale, m_volo_stale, m_vnew_stale, m_delv_stale, m_vdov_stale; int m_arealg_stale; int m_ss_stale; int m_elemMass_stale; void init(Mesh* mesh) { m_mesh = mesh; m_x = m_y = m_z = NULL; m_xd = m_yd = m_zd = NULL; m_xdd = m_ydd = m_zdd = NULL; m_fx = m_fy = m_fz = NULL; m_nodalMass = NULL; m_symmX = m_symmY = m_symmZ = NULL; m_nodeElemCount = m_nodeElemCornerList = NULL; m_matElemlist = m_nodelist = NULL; m_lxim = m_lxip = m_letam = m_letap = m_lzetam = m_lzetap = NULL; m_elemBC = NULL; m_dxx = m_dyy = m_dzz = NULL; m_delv_xi = m_delv_eta = m_delv_zeta = NULL; m_delx_xi = m_delx_eta = m_delx_zeta = NULL; m_e = NULL; m_p = m_q = m_ql = m_qq = NULL; m_v = m_volo = m_vnew = m_delv = m_vdov = NULL; m_arealg = NULL; m_ss = NULL; m_elemMass = NULL; m_x_stale = m_y_stale = m_z_stale = m_xd_stale = m_yd_stale = m_zd_stale = m_xdd_stale = m_ydd_stale = m_zdd_stale = m_fx_stale = m_fy_stale = m_fz_stale = m_nodalMass_stale = m_symmX_stale = m_symmY_stale = m_symmZ_stale = m_nodeElemCount_stale = m_nodeElemCornerList_stale = m_matElemlist_stale = m_nodelist_stale = m_lxim_stale = m_lxip_stale = m_letam_stale = m_letap_stale = m_lzetam_stale = m_lzetap_stale = m_elemBC_stale = m_dxx_stale = m_dyy_stale = m_dzz_stale = m_delv_xi_stale = m_delv_eta_stale = m_delv_zeta_stale = m_delx_xi_stale = m_delx_eta_stale = m_delx_zeta_stale = m_e_stale = m_p_stale = m_q_stale = m_ql_stale = m_qq_stale = m_v_stale = m_volo_stale = m_vnew_stale = m_delv_stale = m_vdov_stale = m_arealg_stale = m_ss_stale = m_elemMass_stale = GPU_STALE; } void freshenGPU() { #define F(var) ::freshenGPU(m_mesh->m_##var, &m_##var, m_##var##_stale); F(x); F(y); F(z); F(xd); F(yd); F(zd); F(xdd); F(ydd); F(zdd); F(fx); F(fy); F(fz); F(nodalMass); F(symmX); F(symmY); F(symmZ); F(nodeElemCount); F(nodeElemCornerList); F(matElemlist); F(nodelist); F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap); F(elemBC); F(dxx); F(dyy); F(dzz); F(delv_xi); F(delv_eta); F(delv_zeta); F(delx_xi); F(delx_eta); F(delx_zeta); F(e); F(p); F(q); F(ql); F(qq); F(v); F(volo); F(vnew); F(delv); F(vdov); F(arealg); F(ss); F(elemMass); #undef F } void freshenCPU() { #define F(var) ::freshenCPU(m_mesh->m_##var, m_##var, m_##var##_stale); F(x); F(y); F(z); F(xd); F(yd); F(zd); F(xdd); F(ydd); F(zdd); F(fx); F(fy); F(fz); F(nodalMass); F(symmX); F(symmY); F(symmZ); F(nodeElemCount); F(nodeElemCornerList); F(matElemlist); F(nodelist); F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap); F(elemBC); F(dxx); F(dyy); F(dzz); F(delv_xi); F(delv_eta); F(delv_zeta); F(delx_xi); F(delx_eta); F(delx_zeta); F(e); F(p); F(q); F(ql); F(qq); F(v); F(volo); F(vnew); F(delv); F(vdov); F(arealg); F(ss); F(elemMass); #undef F } } meshGPU; /* Stuff needed for boundary conditions */ /* 2 BCs on each of 6 hexahedral faces (12 bits) */ #define XI_M 0x003 #define XI_M_SYMM 0x001 #define XI_M_FREE 0x002 #define XI_P 0x00c #define XI_P_SYMM 0x004 #define XI_P_FREE 0x008 #define ETA_M 0x030 #define ETA_M_SYMM 0x010 #define ETA_M_FREE 0x020 #define ETA_P 0x0c0 #define ETA_P_SYMM 0x040 #define ETA_P_FREE 0x080 #define ZETA_M 0x300 #define ZETA_M_SYMM 0x100 #define ZETA_M_FREE 0x200 #define ZETA_P 0xc00 #define ZETA_P_SYMM 0x400 #define ZETA_P_FREE 0x800 static inline void TimeIncrement() { Real_t targetdt = mesh.stoptime() - mesh.time(); if ((mesh.dtfixed() <= Real_t(0.0)) && (mesh.cycle() != Int_t(0))) { Real_t ratio; Real_t olddt = mesh.deltatime(); /* This will require a reduction in parallel */ Real_t newdt = Real_t(1.0e+20); if (mesh.dtcourant() < newdt) { newdt = mesh.dtcourant() / Real_t(2.0); } if (mesh.dthydro() < newdt) { newdt = mesh.dthydro() * Real_t(2.0) / Real_t(3.0); } ratio = newdt / olddt; if (ratio >= Real_t(1.0)) { if (ratio < mesh.deltatimemultlb()) { newdt = olddt; } else if (ratio > mesh.deltatimemultub()) { newdt = olddt * mesh.deltatimemultub(); } } if (newdt > mesh.dtmax()) { newdt = mesh.dtmax(); } mesh.deltatime() = newdt; } /* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */ if ((targetdt > mesh.deltatime()) && (targetdt < (Real_t(4.0) * mesh.deltatime() / Real_t(3.0)))) { targetdt = Real_t(2.0) * mesh.deltatime() / Real_t(3.0); } if (targetdt < mesh.deltatime()) { mesh.deltatime() = targetdt; } mesh.time() += mesh.deltatime(); ++mesh.cycle(); } __global__ void InitStressTermsForElems_kernel(int numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* p, Real_t* q) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElem) sigxx[i] = sigyy[i] = sigzz[i] = -p[i] - q[i]; } static inline void InitStressTermsForElems_gpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, hipStream_t stream_app) { dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(numElem, dimBlock.x), 1, 1); // hipFuncSetCacheConfig(InitStressTermsForElems_kernel,hipFuncCachePreferL1); // // set as default for all kernels after this one hipLaunchKernelGGL(( InitStressTermsForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, sigxx, sigyy, sigzz, meshGPU.m_p, meshGPU.m_q); CUDA_DEBUGSYNC; } static inline void InitStressTermsForElems_cpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz) { // // pull in the stresses appropriate to the hydro integration // for (Index_t i = 0; i < numElem; ++i) { sigxx[i] = sigyy[i] = sigzz[i] = -mesh.p(i) - mesh.q(i); } } static inline void InitStressTermsForElems(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(p); FC(q); InitStressTermsForElems_cpu(numElem, sigxx, sigyy, sigzz); } else { FG(p); FG(q); InitStressTermsForElems_gpu(numElem, sigxx, sigyy, sigzz, stream_app); } } __host__ __device__ static inline void CalcElemShapeFunctionDerivatives( const Real_t* const x, const Real_t* const y, const Real_t* const z, Real_t b[][8], Real_t* const volume) { const Real_t x0 = x[0]; const Real_t x1 = x[1]; const Real_t x2 = x[2]; const Real_t x3 = x[3]; const Real_t x4 = x[4]; const Real_t x5 = x[5]; const Real_t x6 = x[6]; const Real_t x7 = x[7]; const Real_t y0 = y[0]; const Real_t y1 = y[1]; const Real_t y2 = y[2]; const Real_t y3 = y[3]; const Real_t y4 = y[4]; const Real_t y5 = y[5]; const Real_t y6 = y[6]; const Real_t y7 = y[7]; const Real_t z0 = z[0]; const Real_t z1 = z[1]; const Real_t z2 = z[2]; const Real_t z3 = z[3]; const Real_t z4 = z[4]; const Real_t z5 = z[5]; const Real_t z6 = z[6]; const Real_t z7 = z[7]; Real_t fjxxi, fjxet, fjxze; Real_t fjyxi, fjyet, fjyze; Real_t fjzxi, fjzet, fjzze; Real_t cjxxi, cjxet, cjxze; Real_t cjyxi, cjyet, cjyze; Real_t cjzxi, cjzet, cjzze; fjxxi = Real_t(.125) * ((x6 - x0) + (x5 - x3) - (x7 - x1) - (x4 - x2)); fjxet = Real_t(.125) * ((x6 - x0) - (x5 - x3) + (x7 - x1) - (x4 - x2)); fjxze = Real_t(.125) * ((x6 - x0) + (x5 - x3) + (x7 - x1) + (x4 - x2)); fjyxi = Real_t(.125) * ((y6 - y0) + (y5 - y3) - (y7 - y1) - (y4 - y2)); fjyet = Real_t(.125) * ((y6 - y0) - (y5 - y3) + (y7 - y1) - (y4 - y2)); fjyze = Real_t(.125) * ((y6 - y0) + (y5 - y3) + (y7 - y1) + (y4 - y2)); fjzxi = Real_t(.125) * ((z6 - z0) + (z5 - z3) - (z7 - z1) - (z4 - z2)); fjzet = Real_t(.125) * ((z6 - z0) - (z5 - z3) + (z7 - z1) - (z4 - z2)); fjzze = Real_t(.125) * ((z6 - z0) + (z5 - z3) + (z7 - z1) + (z4 - z2)); /* compute cofactors */ cjxxi = (fjyet * fjzze) - (fjzet * fjyze); cjxet = -(fjyxi * fjzze) + (fjzxi * fjyze); cjxze = (fjyxi * fjzet) - (fjzxi * fjyet); cjyxi = -(fjxet * fjzze) + (fjzet * fjxze); cjyet = (fjxxi * fjzze) - (fjzxi * fjxze); cjyze = -(fjxxi * fjzet) + (fjzxi * fjxet); cjzxi = (fjxet * fjyze) - (fjyet * fjxze); cjzet = -(fjxxi * fjyze) + (fjyxi * fjxze); cjzze = (fjxxi * fjyet) - (fjyxi * fjxet); /* calculate partials : this need only be done for l = 0,1,2,3 since , by symmetry , (6,7,4,5) = - (0,1,2,3) . */ b[0][0] = -cjxxi - cjxet - cjxze; b[0][1] = cjxxi - cjxet - cjxze; b[0][2] = cjxxi + cjxet - cjxze; b[0][3] = -cjxxi + cjxet - cjxze; b[0][4] = -b[0][2]; b[0][5] = -b[0][3]; b[0][6] = -b[0][0]; b[0][7] = -b[0][1]; b[1][0] = -cjyxi - cjyet - cjyze; b[1][1] = cjyxi - cjyet - cjyze; b[1][2] = cjyxi + cjyet - cjyze; b[1][3] = -cjyxi + cjyet - cjyze; b[1][4] = -b[1][2]; b[1][5] = -b[1][3]; b[1][6] = -b[1][0]; b[1][7] = -b[1][1]; b[2][0] = -cjzxi - cjzet - cjzze; b[2][1] = cjzxi - cjzet - cjzze; b[2][2] = cjzxi + cjzet - cjzze; b[2][3] = -cjzxi + cjzet - cjzze; b[2][4] = -b[2][2]; b[2][5] = -b[2][3]; b[2][6] = -b[2][0]; b[2][7] = -b[2][1]; /* calculate jacobian determinant (volume) */ *volume = Real_t(8.) * (fjxet * cjxet + fjyet * cjyet + fjzet * cjzet); } __host__ __device__ static inline void SumElemFaceNormal(Real_t* normalX0, Real_t* normalY0, Real_t* normalZ0, Real_t* normalX1, Real_t* normalY1, Real_t* normalZ1, Real_t* normalX2, Real_t* normalY2, Real_t* normalZ2, Real_t* normalX3, Real_t* normalY3, Real_t* normalZ3, const Real_t x0, const Real_t y0, const Real_t z0, const Real_t x1, const Real_t y1, const Real_t z1, const Real_t x2, const Real_t y2, const Real_t z2, const Real_t x3, const Real_t y3, const Real_t z3) { Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0); Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0); Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0); Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0); Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0); Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0); Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1); Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1); Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1); *normalX0 += areaX; *normalX1 += areaX; *normalX2 += areaX; *normalX3 += areaX; *normalY0 += areaY; *normalY1 += areaY; *normalY2 += areaY; *normalY3 += areaY; *normalZ0 += areaZ; *normalZ1 += areaZ; *normalZ2 += areaZ; *normalZ3 += areaZ; } __host__ __device__ static inline void CalcElemNodeNormals(Real_t pfx[8], Real_t pfy[8], Real_t pfz[8], const Real_t x[8], const Real_t y[8], const Real_t z[8]) { for (Index_t i = 0; i < 8; ++i) { pfx[i] = Real_t(0.0); pfy[i] = Real_t(0.0); pfz[i] = Real_t(0.0); } /* evaluate face one: nodes 0, 1, 2, 3 */ SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0], &pfx[1], &pfy[1], &pfz[1], &pfx[2], &pfy[2], &pfz[2], &pfx[3], &pfy[3], &pfz[3], x[0], y[0], z[0], x[1], y[1], z[1], x[2], y[2], z[2], x[3], y[3], z[3]); /* evaluate face two: nodes 0, 4, 5, 1 */ SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0], &pfx[4], &pfy[4], &pfz[4], &pfx[5], &pfy[5], &pfz[5], &pfx[1], &pfy[1], &pfz[1], x[0], y[0], z[0], x[4], y[4], z[4], x[5], y[5], z[5], x[1], y[1], z[1]); /* evaluate face three: nodes 1, 5, 6, 2 */ SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1], &pfx[5], &pfy[5], &pfz[5], &pfx[6], &pfy[6], &pfz[6], &pfx[2], &pfy[2], &pfz[2], x[1], y[1], z[1], x[5], y[5], z[5], x[6], y[6], z[6], x[2], y[2], z[2]); /* evaluate face four: nodes 2, 6, 7, 3 */ SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2], &pfx[6], &pfy[6], &pfz[6], &pfx[7], &pfy[7], &pfz[7], &pfx[3], &pfy[3], &pfz[3], x[2], y[2], z[2], x[6], y[6], z[6], x[7], y[7], z[7], x[3], y[3], z[3]); /* evaluate face five: nodes 3, 7, 4, 0 */ SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3], &pfx[7], &pfy[7], &pfz[7], &pfx[4], &pfy[4], &pfz[4], &pfx[0], &pfy[0], &pfz[0], x[3], y[3], z[3], x[7], y[7], z[7], x[4], y[4], z[4], x[0], y[0], z[0]); /* evaluate face six: nodes 4, 7, 6, 5 */ SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4], &pfx[7], &pfy[7], &pfz[7], &pfx[6], &pfy[6], &pfz[6], &pfx[5], &pfy[5], &pfz[5], x[4], y[4], z[4], x[7], y[7], z[7], x[6], y[6], z[6], x[5], y[5], z[5]); } __host__ __device__ static inline void SumElemStressesToNodeForces( const Real_t B[][8], const Real_t stress_xx, const Real_t stress_yy, const Real_t stress_zz, Real_t* const fx, Real_t* const fy, Real_t* const fz, int stride) { Real_t pfx0 = B[0][0]; Real_t pfx1 = B[0][1]; Real_t pfx2 = B[0][2]; Real_t pfx3 = B[0][3]; Real_t pfx4 = B[0][4]; Real_t pfx5 = B[0][5]; Real_t pfx6 = B[0][6]; Real_t pfx7 = B[0][7]; Real_t pfy0 = B[1][0]; Real_t pfy1 = B[1][1]; Real_t pfy2 = B[1][2]; Real_t pfy3 = B[1][3]; Real_t pfy4 = B[1][4]; Real_t pfy5 = B[1][5]; Real_t pfy6 = B[1][6]; Real_t pfy7 = B[1][7]; Real_t pfz0 = B[2][0]; Real_t pfz1 = B[2][1]; Real_t pfz2 = B[2][2]; Real_t pfz3 = B[2][3]; Real_t pfz4 = B[2][4]; Real_t pfz5 = B[2][5]; Real_t pfz6 = B[2][6]; Real_t pfz7 = B[2][7]; fx[0 * stride] = -(stress_xx * pfx0); fx[1 * stride] = -(stress_xx * pfx1); fx[2 * stride] = -(stress_xx * pfx2); fx[3 * stride] = -(stress_xx * pfx3); fx[4 * stride] = -(stress_xx * pfx4); fx[5 * stride] = -(stress_xx * pfx5); fx[6 * stride] = -(stress_xx * pfx6); fx[7 * stride] = -(stress_xx * pfx7); fy[0 * stride] = -(stress_yy * pfy0); fy[1 * stride] = -(stress_yy * pfy1); fy[2 * stride] = -(stress_yy * pfy2); fy[3 * stride] = -(stress_yy * pfy3); fy[4 * stride] = -(stress_yy * pfy4); fy[5 * stride] = -(stress_yy * pfy5); fy[6 * stride] = -(stress_yy * pfy6); fy[7 * stride] = -(stress_yy * pfy7); fz[0 * stride] = -(stress_zz * pfz0); fz[1 * stride] = -(stress_zz * pfz1); fz[2 * stride] = -(stress_zz * pfz2); fz[3 * stride] = -(stress_zz * pfz3); fz[4 * stride] = -(stress_zz * pfz4); fz[5 * stride] = -(stress_zz * pfz5); fz[6 * stride] = -(stress_zz * pfz6); fz[7 * stride] = -(stress_zz * pfz7); } __global__ void IntegrateStressForElems_kernel(Index_t numElem, Index_t* nodelist, Real_t* x, Real_t* y, Real_t* z, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ) { Real_t B[3][8]; // shape function derivatives Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numElem) { // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = nodelist[k + lnode * numElem]; x_local[lnode] = x[gnode]; y_local[lnode] = y[gnode]; z_local[lnode] = z[gnode]; } /* Volume calculation involves extra work for numerical consistency. */ CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &determ[k]); CalcElemNodeNormals(B[0], B[1], B[2], x_local, y_local, z_local); SumElemStressesToNodeForces(B, sigxx[k], sigyy[k], sigzz[k], &fx_elem[k], &fy_elem[k], &fz_elem[k], numElem); } } __global__ void AddNodeForcesFromElems_kernel(Index_t numNode, Int_t* nodeElemCount, Index_t* nodeElemCornerList, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem, Real_t* fx_node, Real_t* fy_node, Real_t* fz_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { Int_t count = nodeElemCount[i]; Real_t fx, fy, fz; fx = fy = fz = Real_t(0.0); for (int j = 0; j < count; j++) { Index_t elem = nodeElemCornerList[i + numNode * j]; fx += fx_elem[elem]; fy += fy_elem[elem]; fz += fz_elem[elem]; } fx_node[i] = fx; fy_node[i] = fy; fz_node[i] = fz; } } __global__ void AddNodeForcesFromElems2_kernel(Index_t numNode, Int_t* nodeElemCount, Index_t* nodeElemCornerList, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem, Real_t* fx_node, Real_t* fy_node, Real_t* fz_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { Int_t count = nodeElemCount[i]; Real_t fx, fy, fz; fx = fy = fz = Real_t(0.0); for (int j = 0; j < count; j++) { Index_t elem = nodeElemCornerList[i + numNode * j]; fx += fx_elem[elem]; fy += fy_elem[elem]; fz += fz_elem[elem]; } fx_node[i] += fx; fy_node[i] += fy; fz_node[i] += fz; } } static inline void IntegrateStressForElems_gpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ, int& badvol, hipStream_t stream_app) { Real_t *fx_elem, *fy_elem, *fz_elem; CUDA(hipMalloc(&fx_elem, numElem * 8 * sizeof(Real_t))); CUDA(hipMalloc(&fy_elem, numElem * 8 * sizeof(Real_t))); CUDA(hipMalloc(&fz_elem, numElem * 8 * sizeof(Real_t))); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); hipLaunchKernelGGL(( IntegrateStressForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, fx_elem, fy_elem, fz_elem, sigxx, sigyy, sigzz, determ); CUDA_DEBUGSYNC; dimGrid = dim3(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); hipLaunchKernelGGL(( AddNodeForcesFromElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, mesh.numNode(), meshGPU.m_nodeElemCount, meshGPU.m_nodeElemCornerList, fx_elem, fy_elem, fz_elem, meshGPU.m_fx, meshGPU.m_fy, meshGPU.m_fz); CUDA_DEBUGSYNC; // CUDA( hipFree(fx_elem) ); // CUDA( hipFree(fy_elem) ); // CUDA( hipFree(fz_elem) ); // JDC -- need a reduction step to check for non-positive element volumes badvol = 0; } static inline void IntegrateStressForElems_cpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ, int& badvol) { Real_t B[3][8]; // shape function derivatives Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; Real_t fx_local[8]; Real_t fy_local[8]; Real_t fz_local[8]; // loop over all elements for (Index_t k = 0; k < numElem; ++k) { // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); x_local[lnode] = mesh.x(gnode); y_local[lnode] = mesh.y(gnode); z_local[lnode] = mesh.z(gnode); } /* Volume calculation involves extra work for numerical consistency. */ CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &determ[k]); CalcElemNodeNormals(B[0], B[1], B[2], x_local, y_local, z_local); SumElemStressesToNodeForces(B, sigxx[k], sigyy[k], sigzz[k], fx_local, fy_local, fz_local, 1); // copy nodal force contributions to global force arrray. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); mesh.fx(gnode) += fx_local[lnode]; mesh.fy(gnode) += fy_local[lnode]; mesh.fz(gnode) += fz_local[lnode]; } } badvol = 0; for (Index_t k = 0; k < numElem; ++k) { if (determ[k] <= Real_t(0.0)) { badvol = 1; } } } static inline void IntegrateStressForElems(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ, int& badvol, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(nodelist); FC(x); FC(y); FC(z); IntegrateStressForElems_cpu(numElem, sigxx, sigyy, sigzz, determ, badvol); SG(fx); SG(fy); SG(fz); } else { FG(nodelist); FG(nodeElemCount); FG(nodeElemCornerList); FG(x); FG(y); FG(z); IntegrateStressForElems_gpu(numElem, sigxx, sigyy, sigzz, determ, badvol, stream_app); SC(fx); SC(fy); SC(fz); } } static inline void CollectDomainNodesToElemNodes(const Index_t elemNum, Real_t elemX[8], Real_t elemY[8], Real_t elemZ[8]) { Index_t nd0i = mesh.nodelist(elemNum, 0); Index_t nd1i = mesh.nodelist(elemNum, 1); Index_t nd2i = mesh.nodelist(elemNum, 2); Index_t nd3i = mesh.nodelist(elemNum, 3); Index_t nd4i = mesh.nodelist(elemNum, 4); Index_t nd5i = mesh.nodelist(elemNum, 5); Index_t nd6i = mesh.nodelist(elemNum, 6); Index_t nd7i = mesh.nodelist(elemNum, 7); elemX[0] = mesh.x(nd0i); elemX[1] = mesh.x(nd1i); elemX[2] = mesh.x(nd2i); elemX[3] = mesh.x(nd3i); elemX[4] = mesh.x(nd4i); elemX[5] = mesh.x(nd5i); elemX[6] = mesh.x(nd6i); elemX[7] = mesh.x(nd7i); elemY[0] = mesh.y(nd0i); elemY[1] = mesh.y(nd1i); elemY[2] = mesh.y(nd2i); elemY[3] = mesh.y(nd3i); elemY[4] = mesh.y(nd4i); elemY[5] = mesh.y(nd5i); elemY[6] = mesh.y(nd6i); elemY[7] = mesh.y(nd7i); elemZ[0] = mesh.z(nd0i); elemZ[1] = mesh.z(nd1i); elemZ[2] = mesh.z(nd2i); elemZ[3] = mesh.z(nd3i); elemZ[4] = mesh.z(nd4i); elemZ[5] = mesh.z(nd5i); elemZ[6] = mesh.z(nd6i); elemZ[7] = mesh.z(nd7i); } __host__ static inline void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz) { const Real_t twelfth = Real_t(1.0) / Real_t(12.0); *dvdx = (y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) + (y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) - (y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5); *dvdy = -(x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) - (x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) + (x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5); *dvdz = -(y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) - (y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) + (y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5); *dvdx *= twelfth; *dvdy *= twelfth; *dvdz *= twelfth; } #if 0 __device__ static inline void VOLUDER(const Real_t a0, const Real_t a1, const Real_t a2, const Real_t a3, const Real_t a4, const Real_t a5, const Real_t b0, const Real_t b1, const Real_t b2, const Real_t b3, const Real_t b4, const Real_t b5, Real_t& dvdc) { const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; dvdc= (a1 + a2) * (b0 + b1) - (a0 + a1) * (b1 + b2) + (a0 + a4) * (b3 + b4) - (a3 + a4) * (b0 + b4) - (a2 + a5) * (b3 + b5) + (a3 + a5) * (b2 + b5); dvdc *= twelfth; } #else // Even though the above version is inlined, it seems to prohibit some kind of // compiler optimization. // This macro version uses many fewer registers and avoids spill-over into local // memory. #define VOLUDER(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4, b5, dvdc) \ { \ const Real_t twelfth = Real_t(1.0) / Real_t(12.0); \ \ dvdc = ((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \ ((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \ ((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \ dvdc *= twelfth; \ } #endif __host__ static inline void CalcElemVolumeDerivative(Real_t dvdx[8], Real_t dvdy[8], Real_t dvdz[8], const Real_t x[8], const Real_t y[8], const Real_t z[8]) { VoluDer(x[1], x[2], x[3], x[4], x[5], x[7], y[1], y[2], y[3], y[4], y[5], y[7], z[1], z[2], z[3], z[4], z[5], z[7], &dvdx[0], &dvdy[0], &dvdz[0]); VoluDer(x[0], x[1], x[2], x[7], x[4], x[6], y[0], y[1], y[2], y[7], y[4], y[6], z[0], z[1], z[2], z[7], z[4], z[6], &dvdx[3], &dvdy[3], &dvdz[3]); VoluDer(x[3], x[0], x[1], x[6], x[7], x[5], y[3], y[0], y[1], y[6], y[7], y[5], z[3], z[0], z[1], z[6], z[7], z[5], &dvdx[2], &dvdy[2], &dvdz[2]); VoluDer(x[2], x[3], x[0], x[5], x[6], x[4], y[2], y[3], y[0], y[5], y[6], y[4], z[2], z[3], z[0], z[5], z[6], z[4], &dvdx[1], &dvdy[1], &dvdz[1]); VoluDer(x[7], x[6], x[5], x[0], x[3], x[1], y[7], y[6], y[5], y[0], y[3], y[1], z[7], z[6], z[5], z[0], z[3], z[1], &dvdx[4], &dvdy[4], &dvdz[4]); VoluDer(x[4], x[7], x[6], x[1], x[0], x[2], y[4], y[7], y[6], y[1], y[0], y[2], z[4], z[7], z[6], z[1], z[0], z[2], &dvdx[5], &dvdy[5], &dvdz[5]); VoluDer(x[5], x[4], x[7], x[2], x[1], x[3], y[5], y[4], y[7], y[2], y[1], y[3], z[5], z[4], z[7], z[2], z[1], z[3], &dvdx[6], &dvdy[6], &dvdz[6]); VoluDer(x[6], x[5], x[4], x[3], x[2], x[0], y[6], y[5], y[4], y[3], y[2], y[0], z[6], z[5], z[4], z[3], z[2], z[0], &dvdx[7], &dvdy[7], &dvdz[7]); } __device__ static inline void CalcElemVolumeDerivative(Real_t& dvdx, Real_t& dvdy, Real_t& dvdz, const Real_t x, const Real_t y, const Real_t z, unsigned int node) { __shared__ Real_t array1[256], array2[256]; volatile Real_t* va1; volatile Real_t* va2; unsigned int idx, elem; unsigned int ind0, ind1, ind2, ind3, ind4, ind5; switch (node) { case 0: { ind0 = 1; ind1 = 2; ind2 = 3; ind3 = 4; ind4 = 5; ind5 = 7; break; } case 1: { ind0 = 2; ind1 = 3; ind2 = 0; ind3 = 5; ind4 = 6; ind5 = 4; break; } case 2: { ind0 = 3; ind1 = 0; ind2 = 1; ind3 = 6; ind4 = 7; ind5 = 5; break; } case 3: { ind0 = 0; ind1 = 1; ind2 = 2; ind3 = 7; ind4 = 4; ind5 = 6; break; } case 4: { ind0 = 7; ind1 = 6; ind2 = 5; ind3 = 0; ind4 = 3; ind5 = 1; break; } case 5: { ind0 = 4; ind1 = 7; ind2 = 6; ind3 = 1; ind4 = 0; ind5 = 2; break; } case 6: { ind0 = 5; ind1 = 4; ind2 = 7; ind3 = 2; ind4 = 1; ind5 = 3; break; } case 7: { ind0 = 6; ind1 = 5; ind2 = 4; ind3 = 3; ind4 = 2; ind5 = 0; break; } default: { ind0 = ind1 = ind2 = ind3 = ind4 = ind5 = 0xFFFFFFFF; break; } } idx = threadIdx.x; elem = idx /*& 0x1F*/ - node * 32; va1 = &array1[0]; va2 = &array2[0]; // load y and z __syncthreads(); va1[idx] = y; va2[idx] = z; __syncthreads(); VOLUDER(va1[ind0 * 32 + elem], va1[ind1 * 32 + elem], va1[ind2 * 32 + elem], va1[ind3 * 32 + elem], va1[ind4 * 32 + elem], va1[ind5 * 32 + elem], va2[ind0 * 32 + elem], va2[ind1 * 32 + elem], va2[ind2 * 32 + elem], va2[ind3 * 32 + elem], va2[ind4 * 32 + elem], va2[ind5 * 32 + elem], dvdx); // load x __syncthreads(); va1[idx] = x; __syncthreads(); VOLUDER(va2[ind0 * 32 + elem], va2[ind1 * 32 + elem], va2[ind2 * 32 + elem], va2[ind3 * 32 + elem], va2[ind4 * 32 + elem], va2[ind5 * 32 + elem], va1[ind0 * 32 + elem], va1[ind1 * 32 + elem], va1[ind2 * 32 + elem], va1[ind3 * 32 + elem], va1[ind4 * 32 + elem], va1[ind5 * 32 + elem], dvdy); __syncthreads(); // load y __syncthreads(); va2[idx] = y; __syncthreads(); VOLUDER(va1[ind0 * 32 + elem], va1[ind1 * 32 + elem], va1[ind2 * 32 + elem], va1[ind3 * 32 + elem], va1[ind4 * 32 + elem], va1[ind5 * 32 + elem], va2[ind0 * 32 + elem], va2[ind1 * 32 + elem], va2[ind2 * 32 + elem], va2[ind3 * 32 + elem], va2[ind4 * 32 + elem], va2[ind5 * 32 + elem], dvdz); __syncthreads(); } __host__ static inline void CalcElemFBHourglassForce(Real_t* xd, Real_t* yd, Real_t* zd, Real_t* hourgam0, Real_t* hourgam1, Real_t* hourgam2, Real_t* hourgam3, Real_t* hourgam4, Real_t* hourgam5, Real_t* hourgam6, Real_t* hourgam7, Real_t coefficient, Real_t* hgfx, Real_t* hgfy, Real_t* hgfz) { Index_t i00 = 0; Index_t i01 = 1; Index_t i02 = 2; Index_t i03 = 3; Real_t h00 = hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] + hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] + hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] + hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7]; Real_t h01 = hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] + hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] + hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] + hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7]; Real_t h02 = hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1] + hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3] + hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5] + hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7]; Real_t h03 = hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] + hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] + hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] + hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7]; hgfx[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfx[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfx[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfx[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfx[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfx[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfx[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfx[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); h00 = hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] + hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] + hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] + hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7]; h01 = hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] + hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] + hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] + hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7]; h02 = hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1] + hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3] + hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5] + hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7]; h03 = hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] + hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] + hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] + hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7]; hgfy[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfy[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfy[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfy[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfy[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfy[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfy[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfy[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); h00 = hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] + hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] + hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] + hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7]; h01 = hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] + hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] + hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] + hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7]; h02 = hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1] + hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3] + hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5] + hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7]; h03 = hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] + hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] + hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] + hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7]; hgfz[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfz[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfz[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfz[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfz[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfz[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfz[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfz[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); } __shared__ Real_t shm_array[32 * 8]; __device__ static inline Real_t SumOverNodes(Real_t val) { // Sum up 8 node values for each element // Assumes 256 threads: 32 elements, 8 nodes per element. // NOTE: we could probably avoid some of the __syncthreads() if we map 8 nodes // of an element to the same warp. unsigned int tid = threadIdx.x; #if 1 #if 0 unsigned int node=tid>>5; unsigned int elem=tid-(node<<5); #elif 1 unsigned int node = tid / 32; unsigned int elem = tid - (node * 32); #else unsigned int elem = tid & 0x1F; #endif __syncthreads(); shm_array[tid] = val; __syncthreads(); if (tid < 128) shm_array[tid] += shm_array[tid + 128]; __syncthreads(); if (tid < 64) shm_array[tid] += shm_array[tid + 64]; __syncthreads(); if (tid < 32) shm_array[tid] += shm_array[tid + 32]; __syncthreads(); Real_t ret = shm_array[elem]; __syncthreads(); return ret; #else #if 0 unsigned int node=tid>>5; unsigned int elem=tid-(node<<5); #else unsigned int node = tid / 32; unsigned int elem = tid - (node * 32); #endif unsigned int idx = elem * 8 + node; __syncthreads(); shm_array[idx] = val; __syncthreads(); if (node < 4) shm_array[idx] += shm_array[idx + 4]; if (node < 2) shm_array[idx] += shm_array[idx + 2]; if (node < 1) shm_array[idx] += shm_array[idx + 1]; __syncthreads(); return shm_array[elem * 8]; #endif } __device__ static inline void CalcElemFBHourglassForce(Real_t xd, Real_t yd, Real_t zd, Real_t* hourgam, Real_t coefficient, Real_t& hgfx, Real_t& hgfy, Real_t& hgfz) { hgfx = 0; for (int i = 0; i < 4; i++) { Real_t h; h = hourgam[i] * xd; h = SumOverNodes(h); hgfx += hourgam[i] * h; } hgfx *= coefficient; hgfy = 0; for (int i = 0; i < 4; i++) { Real_t h; h = hourgam[i] * yd; h = SumOverNodes(h); hgfy += hourgam[i] * h; } hgfy *= coefficient; hgfz = 0; for (int i = 0; i < 4; i++) { Real_t h; h = hourgam[i] * zd; h = SumOverNodes(h); hgfz += hourgam[i] * h; } hgfz *= coefficient; } __global__ void CalcFBHourglassForceForElems_kernel(Real_t* determ, Real_t* x8n, Real_t* y8n, Real_t* z8n, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t hourg, Index_t numElem, Index_t* nodelist, Real_t* ss, Real_t* elemMass, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem) { /************************************************* * * FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass * force. * *************************************************/ Real_t hgfx, hgfy, hgfz; Real_t coefficient; Real_t hourgam[4]; Real_t xd1, yd1, zd1; /*************************************************/ /* compute the hourglass modes */ const Real_t posf = Real_t(1.); const Real_t negf = Real_t(-1.); // Assume we will launch 256 threads, which we map to 32 elements, each // with 8 per-node threads. Organize so each warp of 32 consecutive // threads operates on the same node of different elements. // THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!! unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; #if 0 unsigned int node=tid>>5; unsigned int elem=bid<<5 + (tid - (node<<5)); #elif 1 unsigned int node = tid / 32; unsigned int elem = bid * 32 + (tid - node * 32); #elif 0 unsigned int node = tid / 32; ; unsigned int elem = bid * 32 + (tid & 0x1F); #elif 0 unsigned int node = tid / 32; unsigned int elem = bid << 5 + (tid & 0x1F); #elif 0 unsigned int node = tid >> 5; unsigned int elem = bid * 32 + (tid & 0x1F); #else unsigned int node = tid >> 5; unsigned int elem = bid << 5 + (tid & 0x1F); #endif if (elem >= numElem) elem = numElem - 1; // don't return -- need thread to participate in sync operations // if (elem<0) elem=0; // debugging test Real_t volinv = Real_t(1.0) / determ[elem]; Real_t ss1, mass1, volume13; Real_t xn, yn, zn, dvdxn, dvdyn, dvdzn; Real_t hourmodx, hourmody, hourmodz; #if 1 xn = x8n[elem + numElem * node]; yn = y8n[elem + numElem * node]; zn = z8n[elem + numElem * node]; dvdxn = dvdx[elem + numElem * node]; dvdyn = dvdy[elem + numElem * node]; dvdzn = dvdz[elem + numElem * node]; #else xn = yn = zn = posf; dvdxn = dvdyn = dvdzn = negf; #endif #if 1 hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 2 || node == 3 || node == 4 || node == 5) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[0] = negf; } else hourgam[0] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[0] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 1 || node == 2 || node == 4 || node == 7) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[1] = negf; } else hourgam[1] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[1] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 1 || node == 3 || node == 5 || node == 7) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[2] = negf; } else hourgam[2] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[2] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 0 || node == 2 || node == 5 || node == 7) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[3] = negf; } else hourgam[3] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[3] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); /* compute forces */ /* store forces into h arrays (force arrays) */ ss1 = ss[elem]; mass1 = elemMass[elem]; volume13 = CBRT(determ[elem]); Index_t ni = nodelist[elem + numElem * node]; xd1 = xd[ni]; yd1 = yd[ni]; zd1 = zd[ni]; coefficient = -hourg * Real_t(0.01) * ss1 * mass1 / volume13; CalcElemFBHourglassForce(xd1, yd1, zd1, hourgam, coefficient, hgfx, hgfy, hgfz); #else hgfx = xn + dvdxn; hgfy = yn + dvdyn; hgfz = zn + dvdzn; #endif #if 1 fx_elem[elem + numElem * node] = hgfx; fy_elem[elem + numElem * node] = hgfy; fz_elem[elem + numElem * node] = hgfz; #else fx_elem[0] = hgfx; fy_elem[0] = hgfy; fz_elem[0] = hgfz; #endif } static inline void CalcFBHourglassForceForElems_cpu(Real_t* determ, Real_t* x8n, Real_t* y8n, Real_t* z8n, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t hourg) { /************************************************* * * FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass * force. * *************************************************/ Index_t numElem = mesh.numElem(); Real_t hgfx[8], hgfy[8], hgfz[8]; Real_t coefficient; Real_t gamma[4][8]; Real_t hourgam0[4], hourgam1[4], hourgam2[4], hourgam3[4]; Real_t hourgam4[4], hourgam5[4], hourgam6[4], hourgam7[4]; Real_t xd1[8], yd1[8], zd1[8]; gamma[0][0] = Real_t(1.); gamma[0][1] = Real_t(1.); gamma[0][2] = Real_t(-1.); gamma[0][3] = Real_t(-1.); gamma[0][4] = Real_t(-1.); gamma[0][5] = Real_t(-1.); gamma[0][6] = Real_t(1.); gamma[0][7] = Real_t(1.); gamma[1][0] = Real_t(1.); gamma[1][1] = Real_t(-1.); gamma[1][2] = Real_t(-1.); gamma[1][3] = Real_t(1.); gamma[1][4] = Real_t(-1.); gamma[1][5] = Real_t(1.); gamma[1][6] = Real_t(1.); gamma[1][7] = Real_t(-1.); gamma[2][0] = Real_t(1.); gamma[2][1] = Real_t(-1.); gamma[2][2] = Real_t(1.); gamma[2][3] = Real_t(-1.); gamma[2][4] = Real_t(1.); gamma[2][5] = Real_t(-1.); gamma[2][6] = Real_t(1.); gamma[2][7] = Real_t(-1.); gamma[3][0] = Real_t(-1.); gamma[3][1] = Real_t(1.); gamma[3][2] = Real_t(-1.); gamma[3][3] = Real_t(1.); gamma[3][4] = Real_t(1.); gamma[3][5] = Real_t(-1.); gamma[3][6] = Real_t(1.); gamma[3][7] = Real_t(-1.); /*************************************************/ /* compute the hourglass modes */ for (Index_t i2 = 0; i2 < numElem; ++i2) { Index_t i3 = 8 * i2; Real_t volinv = Real_t(1.0) / determ[i2]; Real_t ss1, mass1, volume13; for (Index_t i1 = 0; i1 < 4; ++i1) { Real_t hourmodx = x8n[i3] * gamma[i1][0] + x8n[i3 + 1] * gamma[i1][1] + x8n[i3 + 2] * gamma[i1][2] + x8n[i3 + 3] * gamma[i1][3] + x8n[i3 + 4] * gamma[i1][4] + x8n[i3 + 5] * gamma[i1][5] + x8n[i3 + 6] * gamma[i1][6] + x8n[i3 + 7] * gamma[i1][7]; Real_t hourmody = y8n[i3] * gamma[i1][0] + y8n[i3 + 1] * gamma[i1][1] + y8n[i3 + 2] * gamma[i1][2] + y8n[i3 + 3] * gamma[i1][3] + y8n[i3 + 4] * gamma[i1][4] + y8n[i3 + 5] * gamma[i1][5] + y8n[i3 + 6] * gamma[i1][6] + y8n[i3 + 7] * gamma[i1][7]; Real_t hourmodz = z8n[i3] * gamma[i1][0] + z8n[i3 + 1] * gamma[i1][1] + z8n[i3 + 2] * gamma[i1][2] + z8n[i3 + 3] * gamma[i1][3] + z8n[i3 + 4] * gamma[i1][4] + z8n[i3 + 5] * gamma[i1][5] + z8n[i3 + 6] * gamma[i1][6] + z8n[i3 + 7] * gamma[i1][7]; hourgam0[i1] = gamma[i1][0] - volinv * (dvdx[i3] * hourmodx + dvdy[i3] * hourmody + dvdz[i3] * hourmodz); hourgam1[i1] = gamma[i1][1] - volinv * (dvdx[i3 + 1] * hourmodx + dvdy[i3 + 1] * hourmody + dvdz[i3 + 1] * hourmodz); hourgam2[i1] = gamma[i1][2] - volinv * (dvdx[i3 + 2] * hourmodx + dvdy[i3 + 2] * hourmody + dvdz[i3 + 2] * hourmodz); hourgam3[i1] = gamma[i1][3] - volinv * (dvdx[i3 + 3] * hourmodx + dvdy[i3 + 3] * hourmody + dvdz[i3 + 3] * hourmodz); hourgam4[i1] = gamma[i1][4] - volinv * (dvdx[i3 + 4] * hourmodx + dvdy[i3 + 4] * hourmody + dvdz[i3 + 4] * hourmodz); hourgam5[i1] = gamma[i1][5] - volinv * (dvdx[i3 + 5] * hourmodx + dvdy[i3 + 5] * hourmody + dvdz[i3 + 5] * hourmodz); hourgam6[i1] = gamma[i1][6] - volinv * (dvdx[i3 + 6] * hourmodx + dvdy[i3 + 6] * hourmody + dvdz[i3 + 6] * hourmodz); hourgam7[i1] = gamma[i1][7] - volinv * (dvdx[i3 + 7] * hourmodx + dvdy[i3 + 7] * hourmody + dvdz[i3 + 7] * hourmodz); } /* compute forces */ /* store forces into h arrays (force arrays) */ ss1 = mesh.ss(i2); mass1 = mesh.elemMass(i2); volume13 = CBRT(determ[i2]); Index_t n0si2 = mesh.nodelist(i2, 0); Index_t n1si2 = mesh.nodelist(i2, 1); Index_t n2si2 = mesh.nodelist(i2, 2); Index_t n3si2 = mesh.nodelist(i2, 3); Index_t n4si2 = mesh.nodelist(i2, 4); Index_t n5si2 = mesh.nodelist(i2, 5); Index_t n6si2 = mesh.nodelist(i2, 6); Index_t n7si2 = mesh.nodelist(i2, 7); xd1[0] = mesh.xd(n0si2); xd1[1] = mesh.xd(n1si2); xd1[2] = mesh.xd(n2si2); xd1[3] = mesh.xd(n3si2); xd1[4] = mesh.xd(n4si2); xd1[5] = mesh.xd(n5si2); xd1[6] = mesh.xd(n6si2); xd1[7] = mesh.xd(n7si2); yd1[0] = mesh.yd(n0si2); yd1[1] = mesh.yd(n1si2); yd1[2] = mesh.yd(n2si2); yd1[3] = mesh.yd(n3si2); yd1[4] = mesh.yd(n4si2); yd1[5] = mesh.yd(n5si2); yd1[6] = mesh.yd(n6si2); yd1[7] = mesh.yd(n7si2); zd1[0] = mesh.zd(n0si2); zd1[1] = mesh.zd(n1si2); zd1[2] = mesh.zd(n2si2); zd1[3] = mesh.zd(n3si2); zd1[4] = mesh.zd(n4si2); zd1[5] = mesh.zd(n5si2); zd1[6] = mesh.zd(n6si2); zd1[7] = mesh.zd(n7si2); coefficient = -hourg * Real_t(0.01) * ss1 * mass1 / volume13; CalcElemFBHourglassForce(xd1, yd1, zd1, hourgam0, hourgam1, hourgam2, hourgam3, hourgam4, hourgam5, hourgam6, hourgam7, coefficient, hgfx, hgfy, hgfz); mesh.fx(n0si2) += hgfx[0]; mesh.fy(n0si2) += hgfy[0]; mesh.fz(n0si2) += hgfz[0]; mesh.fx(n1si2) += hgfx[1]; mesh.fy(n1si2) += hgfy[1]; mesh.fz(n1si2) += hgfz[1]; mesh.fx(n2si2) += hgfx[2]; mesh.fy(n2si2) += hgfy[2]; mesh.fz(n2si2) += hgfz[2]; mesh.fx(n3si2) += hgfx[3]; mesh.fy(n3si2) += hgfy[3]; mesh.fz(n3si2) += hgfz[3]; mesh.fx(n4si2) += hgfx[4]; mesh.fy(n4si2) += hgfy[4]; mesh.fz(n4si2) += hgfz[4]; mesh.fx(n5si2) += hgfx[5]; mesh.fy(n5si2) += hgfy[5]; mesh.fz(n5si2) += hgfz[5]; mesh.fx(n6si2) += hgfx[6]; mesh.fy(n6si2) += hgfy[6]; mesh.fz(n6si2) += hgfz[6]; mesh.fx(n7si2) += hgfx[7]; mesh.fy(n7si2) += hgfy[7]; mesh.fz(n7si2) += hgfz[7]; } } static inline void CalcFBHourglassForceForElems_gpu(Real_t* determ, Real_t* x8n, Real_t* y8n, Real_t* z8n, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t hourg, hipStream_t stream_app) { Index_t numElem = mesh.numElem(); Real_t *fx_elem, *fy_elem, *fz_elem; CUDA(hipMalloc(&fx_elem, numElem * 8 * sizeof(Real_t))); CUDA(hipMalloc(&fy_elem, numElem * 8 * sizeof(Real_t))); CUDA(hipMalloc(&fz_elem, numElem * 8 * sizeof(Real_t))); dim3 dimBlock = dim3(256, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem * 8, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcFBHourglassForceForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, determ, x8n, y8n, z8n, dvdx, dvdy, dvdz, hourg, numElem, meshGPU.m_nodelist, meshGPU.m_ss, meshGPU.m_elemMass, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, fx_elem, fy_elem, fz_elem); CUDA_DEBUGSYNC; dimGrid = dim3(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); hipLaunchKernelGGL(( AddNodeForcesFromElems2_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, mesh.numNode(), meshGPU.m_nodeElemCount, meshGPU.m_nodeElemCornerList, fx_elem, fy_elem, fz_elem, meshGPU.m_fx, meshGPU.m_fy, meshGPU.m_fz); CUDA_DEBUGSYNC; // CUDA( hipFree(fx_elem) ); // CUDA( hipFree(fy_elem) ); // CUDA( hipFree(fz_elem) ); } __global__ void CalcHourglassControlForElems_kernel(Int_t numElem, Index_t* nodelist, Real_t* x, Real_t* y, Real_t* z, Real_t* determ, Real_t* volo, Real_t* v, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t* x8n, Real_t* y8n, Real_t* z8n) { Real_t x1, y1, z1; Real_t pfx, pfy, pfz; // THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!! unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; #if 0 unsigned int node=tid>>5; unsigned int elem=bid<<5 + (tid - (node<<5)); #elif 1 unsigned int node = tid / 32; unsigned int elem = bid * 32 + (tid - node * 32); #elif 0 unsigned int node = tid / 32; ; unsigned int elem = bid * 32 + (tid & 0x1F); #elif 0 unsigned int node = tid / 32; unsigned int elem = bid << 5 + (tid & 0x1F); #elif 0 unsigned int node = tid >> 5; unsigned int elem = bid * 32 + (tid & 0x1F); #else unsigned int node = tid >> 5; unsigned int elem = bid << 5 + (tid & 0x1F); #endif if (elem >= numElem) elem = numElem - 1; // don't return -- need thread to participate in sync operations Index_t idx = elem + numElem * node; Index_t ni = nodelist[idx]; x1 = x[ni]; y1 = y[ni]; z1 = z[ni]; CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1, node); /* load into temporary storage for FB Hour Glass control */ dvdx[idx] = pfx; dvdy[idx] = pfy; dvdz[idx] = pfz; x8n[idx] = x1; y8n[idx] = y1; z8n[idx] = z1; // if (node==0) determ[elem] = volo[elem] * v[elem]; #if 0 // JDC /* Do a check for negative volumes */ if ( mesh.v(i) <= Real_t(0.0) ) { exit(VolumeError) ; } #endif } static inline void CalcHourglassControlForElems_gpu(Real_t determ[], Real_t hgcoef, hipStream_t stream_app) { Index_t numElem = mesh.numElem(); Index_t numElem8 = numElem * 8; Real_t *dvdx, *dvdy, *dvdz; Real_t *x8n, *y8n, *z8n; CUDA(hipMalloc(&dvdx, sizeof(Real_t) * numElem8)); CUDA(hipMalloc(&dvdy, sizeof(Real_t) * numElem8)); CUDA(hipMalloc(&dvdz, sizeof(Real_t) * numElem8)); CUDA(hipMalloc(&x8n, sizeof(Real_t) * numElem8)); CUDA(hipMalloc(&y8n, sizeof(Real_t) * numElem8)); CUDA(hipMalloc(&z8n, sizeof(Real_t) * numElem8)); dim3 dimBlock = dim3(256, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem * 8, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcHourglassControlForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, determ, meshGPU.m_volo, meshGPU.m_v, dvdx, dvdy, dvdz, x8n, y8n, z8n); CUDA_DEBUGSYNC; // JDC -- need a reduction to check for negative volumes if (hgcoef > Real_t(0.)) { CalcFBHourglassForceForElems_gpu(determ, x8n, y8n, z8n, dvdx, dvdy, dvdz, hgcoef, stream_app); // kernel exec } // CUDA( hipFree(dvdx) ); // CUDA( hipFree(dvdy) ); // CUDA( hipFree(dvdz) ); // CUDA( hipFree(x8n) ); // CUDA( hipFree(y8n) ); // CUDA( hipFree(z8n) ); return; } static inline void CalcHourglassControlForElems_cpu(Real_t determ[], Real_t hgcoef) { Index_t i, ii, jj; Real_t x1[8], y1[8], z1[8]; Real_t pfx[8], pfy[8], pfz[8]; Index_t numElem = mesh.numElem(); Index_t numElem8 = numElem * 8; Real_t* dvdx = Allocate<Real_t>(numElem8); Real_t* dvdy = Allocate<Real_t>(numElem8); Real_t* dvdz = Allocate<Real_t>(numElem8); Real_t* x8n = Allocate<Real_t>(numElem8); Real_t* y8n = Allocate<Real_t>(numElem8); Real_t* z8n = Allocate<Real_t>(numElem8); /* start loop over elements */ for (i = 0; i < numElem; ++i) { CollectDomainNodesToElemNodes(i, x1, y1, z1); CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1); /* load into temporary storage for FB Hour Glass control */ for (ii = 0; ii < 8; ++ii) { jj = 8 * i + ii; dvdx[jj] = pfx[ii]; dvdy[jj] = pfy[ii]; dvdz[jj] = pfz[ii]; x8n[jj] = x1[ii]; y8n[jj] = y1[ii]; z8n[jj] = z1[ii]; } determ[i] = mesh.volo(i) * mesh.v(i); /* Do a check for negative volumes */ if (mesh.v(i) <= Real_t(0.0)) { exit(VolumeError); } } if (hgcoef > Real_t(0.)) { CalcFBHourglassForceForElems_cpu(determ, x8n, y8n, z8n, dvdx, dvdy, dvdz, hgcoef); } Release(&z8n); Release(&y8n); Release(&x8n); Release(&dvdz); Release(&dvdy); Release(&dvdx); return; } static inline void CalcHourglassControlForElems(Real_t determ[], Real_t hgcoef, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); FC(nodelist); FC(ss); FC(elemMass); FC(xd); FC(yd); FC(zd); FC(fx); FC(fy); FC(fz); CalcHourglassControlForElems_cpu(determ, hgcoef); SG(fx); SG(fy); SG(fz); } else { FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); FG(nodelist); FG(ss); FG(elemMass); FG(xd); FG(yd); FG(zd); FG(fx); FG(fy); FG(fz); CalcHourglassControlForElems_gpu(determ, hgcoef, stream_app); // kernel exec SC(fx); SC(fy); SC(fz); } } static inline void CalcVolumeForceForElems_gpu(hipStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t hgcoef = mesh.hgcoef(); Real_t *sigxx, *sigyy, *sigzz, *determ; int badvol; CUDA(hipMalloc(&sigxx, numElem * sizeof(Real_t))); CUDA(hipMalloc(&sigyy, numElem * sizeof(Real_t))); CUDA(hipMalloc(&sigzz, numElem * sizeof(Real_t))); CUDA(hipMalloc(&determ, numElem * sizeof(Real_t))); /* Sum contributions to total stress tensor */ InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 0, stream_app); // call elemlib stress integration loop to produce nodal forces from // material stresses. IntegrateStressForElems(numElem, sigxx, sigyy, sigzz, determ, badvol, 0, stream_app); // CUDA( hipFree(sigxx) ); // CUDA( hipFree(sigyy) ); // CUDA( hipFree(sigzz) ); // check for negative element volume if (badvol) exit(VolumeError); CalcHourglassControlForElems(determ, hgcoef, 0, stream_app); // CUDA( hipFree(determ) ); } } static inline void CalcVolumeForceForElems_cpu(hipStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t hgcoef = mesh.hgcoef(); Real_t* sigxx = Allocate<Real_t>(numElem); Real_t* sigyy = Allocate<Real_t>(numElem); Real_t* sigzz = Allocate<Real_t>(numElem); Real_t* determ = Allocate<Real_t>(numElem); int badvol; /* Sum contributions to total stress tensor */ InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 1, stream_app); // call elemlib stress integration loop to produce nodal forces from // material stresses. IntegrateStressForElems(numElem, sigxx, sigyy, sigzz, determ, badvol, 1, stream_app); Release(&sigzz); Release(&sigyy); Release(&sigxx); // check for negative element volume if (badvol) exit(VolumeError); #if 0 for ( Index_t k=0 ; k<numElem ; ++k ) { if (determ[k] <= Real_t(0.0)) { exit(VolumeError) ; } } #endif CalcHourglassControlForElems(determ, hgcoef, 1, stream_app); Release(&determ); } } static inline void CalcForceForNodes_gpu(hipStream_t stream_app) { /* Calcforce calls partial, force, hourq */ CalcVolumeForceForElems_gpu(stream_app); /* Calculate Nodal Forces at domain boundaries */ /* problem->commSBN->Transfer(CommSBN::forces); */ } static inline void CalcForceForNodes_cpu(hipStream_t stream_app) { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { mesh.fx(i) = Real_t(0.0); mesh.fy(i) = Real_t(0.0); mesh.fz(i) = Real_t(0.0); } /* Calcforce calls partial, force, hourq */ CalcVolumeForceForElems_cpu(stream_app); /* Calculate Nodal Forces at domain boundaries */ /* problem->commSBN->Transfer(CommSBN::forces); */ } static inline void CalcForceForNodes(int useCPU, hipStream_t stream_app) { if (useCPU) { CalcForceForNodes_cpu(stream_app); } else { CalcForceForNodes_gpu(stream_app); } } __global__ void CalcAccelerationForNodes_kernel(int numNode, Real_t* xdd, Real_t* ydd, Real_t* zdd, Real_t* fx, Real_t* fy, Real_t* fz, Real_t* nodalMass) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { xdd[i] = fx[i] / nodalMass[i]; ydd[i] = fy[i] / nodalMass[i]; zdd[i] = fz[i] / nodalMass[i]; } } static inline void CalcAccelerationForNodes_gpu(hipStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcAccelerationForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, mesh.numNode(), meshGPU.m_xdd, meshGPU.m_ydd, meshGPU.m_zdd, meshGPU.m_fx, meshGPU.m_fy, meshGPU.m_fz, meshGPU.m_nodalMass); CUDA_DEBUGSYNC; } static inline void CalcAccelerationForNodes_cpu() { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { mesh.xdd(i) = mesh.fx(i) / mesh.nodalMass(i); mesh.ydd(i) = mesh.fy(i) / mesh.nodalMass(i); mesh.zdd(i) = mesh.fz(i) / mesh.nodalMass(i); } } static inline void CalcAccelerationForNodes(int useCPU, hipStream_t stream_app) { if (useCPU) { FC(fx); FC(fy); FC(fz); FC(nodalMass); CalcAccelerationForNodes_cpu(); SG(xdd); SG(ydd); SG(zdd); } else { FG(fx); FG(fy); FG(fz); FG(nodalMass); CalcAccelerationForNodes_gpu(stream_app); SC(xdd); SC(ydd); SC(zdd); } } __global__ void ApplyAccelerationBoundaryConditionsForNodes_kernel( int numNodeBC, Real_t* xdd, Real_t* ydd, Real_t* zdd, Index_t* symmX, Index_t* symmY, Index_t* symmZ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNodeBC) { xdd[symmX[i]] = Real_t(0.0); ydd[symmY[i]] = Real_t(0.0); zdd[symmZ[i]] = Real_t(0.0); } } static inline void ApplyAccelerationBoundaryConditionsForNodes_gpu( hipStream_t stream_app) { Index_t numNodeBC = (mesh.sizeX() + 1) * (mesh.sizeX() + 1); dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(numNodeBC, dimBlock.x), 1, 1); hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numNodeBC, meshGPU.m_xdd, meshGPU.m_ydd, meshGPU.m_zdd, meshGPU.m_symmX, meshGPU.m_symmY, meshGPU.m_symmZ); CUDA_DEBUGSYNC; } static inline void ApplyAccelerationBoundaryConditionsForNodes_cpu() { Index_t numNodeBC = (mesh.sizeX() + 1) * (mesh.sizeX() + 1); for (Index_t i = 0; i < numNodeBC; ++i) mesh.xdd(mesh.symmX(i)) = Real_t(0.0); for (Index_t i = 0; i < numNodeBC; ++i) mesh.ydd(mesh.symmY(i)) = Real_t(0.0); for (Index_t i = 0; i < numNodeBC; ++i) mesh.zdd(mesh.symmZ(i)) = Real_t(0.0); } static inline void ApplyAccelerationBoundaryConditionsForNodes( int useCPU, hipStream_t stream_app) { if (useCPU) { FC(xdd); FC(ydd); FC(zdd); FC(symmX); FC(symmY); FC(symmZ); ApplyAccelerationBoundaryConditionsForNodes_cpu(); SG(xdd); SG(ydd); SG(zdd); } else { FG(xdd); FG(ydd); FG(zdd); FG(symmX); FG(symmY); FG(symmZ); ApplyAccelerationBoundaryConditionsForNodes_gpu(stream_app); SC(xdd); SC(ydd); SC(zdd); } } __global__ void CalcVelocityForNodes_kernel(int numNode, const Real_t dt, const Real_t u_cut, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* xdd, Real_t* ydd, Real_t* zdd) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { Real_t xdtmp, ydtmp, zdtmp; xdtmp = xd[i] + xdd[i] * dt; if (FABS(xdtmp) < u_cut) xdtmp = 0.0; // Real_t(0.0); xd[i] = xdtmp; ydtmp = yd[i] + ydd[i] * dt; if (FABS(ydtmp) < u_cut) ydtmp = Real_t(0.0); yd[i] = ydtmp; zdtmp = zd[i] + zdd[i] * dt; if (FABS(zdtmp) < u_cut) zdtmp = Real_t(0.0); zd[i] = zdtmp; } } static inline void CalcVelocityForNodes_gpu(const Real_t dt, const Real_t u_cut, hipStream_t stream_app) { dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcVelocityForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, mesh.numNode(), dt, u_cut, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, meshGPU.m_xdd, meshGPU.m_ydd, meshGPU.m_zdd); CUDA_DEBUGSYNC; } static inline void CalcVelocityForNodes_cpu(const Real_t dt, const Real_t u_cut) { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { Real_t xdtmp, ydtmp, zdtmp; xdtmp = mesh.xd(i) + mesh.xdd(i) * dt; if (FABS(xdtmp) < u_cut) xdtmp = Real_t(0.0); mesh.xd(i) = xdtmp; ydtmp = mesh.yd(i) + mesh.ydd(i) * dt; if (FABS(ydtmp) < u_cut) ydtmp = Real_t(0.0); mesh.yd(i) = ydtmp; zdtmp = mesh.zd(i) + mesh.zdd(i) * dt; if (FABS(zdtmp) < u_cut) zdtmp = Real_t(0.0); mesh.zd(i) = zdtmp; } } static inline void CalcVelocityForNodes(const Real_t dt, const Real_t u_cut, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(xd); FC(yd); FC(zd); FC(xdd); FC(ydd); FC(zdd); CalcVelocityForNodes_cpu(dt, u_cut); SG(xd); SG(yd); SG(zd); } else { FG(xd); FG(yd); FG(zd); FG(xdd); FG(ydd); FG(zdd); CalcVelocityForNodes_gpu(dt, u_cut, stream_app); SC(xd); SC(yd); SC(zd); } } __global__ void CalcPositionForNodes_kernel(int numNode, Real_t dt, Real_t* x, Real_t* y, Real_t* z, Real_t* xd, Real_t* yd, Real_t* zd) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { x[i] += xd[i] * dt; y[i] += yd[i] * dt; z[i] += zd[i] * dt; } } static inline void CalcPositionForNodes_gpu(const Real_t dt, hipStream_t stream_app) { dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcPositionForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, mesh.numNode(), dt, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd); CUDA_DEBUGSYNC; } static inline void CalcPositionForNodes_cpu(const Real_t dt) { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { mesh.x(i) += mesh.xd(i) * dt; mesh.y(i) += mesh.yd(i) * dt; mesh.z(i) += mesh.zd(i) * dt; } } static inline void CalcPositionForNodes(const Real_t dt, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); CalcPositionForNodes_cpu(dt); SG(x); SG(y); SG(z); } else { FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); CalcPositionForNodes_gpu(dt, stream_app); SC(x); SC(y); SC(z); } } static inline void LagrangeNodal(int useCPU, hipStream_t stream_app) { const Real_t delt = mesh.deltatime(); Real_t u_cut = mesh.u_cut(); /* time of boundary condition evaluation is beginning of step for force and * acceleration boundary conditions. */ CalcForceForNodes(/*0*/ useCPU, stream_app); CalcAccelerationForNodes(useCPU, stream_app); ApplyAccelerationBoundaryConditionsForNodes(useCPU, stream_app); CalcVelocityForNodes(delt, u_cut, useCPU, stream_app); CalcPositionForNodes(delt, useCPU, stream_app); return; } __host__ __device__ static inline Real_t CalcElemVolume(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t x6, const Real_t x7, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t y6, const Real_t y7, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, const Real_t z6, const Real_t z7) { Real_t twelveth = Real_t(1.0) / Real_t(12.0); Real_t dx61 = x6 - x1; Real_t dy61 = y6 - y1; Real_t dz61 = z6 - z1; Real_t dx70 = x7 - x0; Real_t dy70 = y7 - y0; Real_t dz70 = z7 - z0; Real_t dx63 = x6 - x3; Real_t dy63 = y6 - y3; Real_t dz63 = z6 - z3; Real_t dx20 = x2 - x0; Real_t dy20 = y2 - y0; Real_t dz20 = z2 - z0; Real_t dx50 = x5 - x0; Real_t dy50 = y5 - y0; Real_t dz50 = z5 - z0; Real_t dx64 = x6 - x4; Real_t dy64 = y6 - y4; Real_t dz64 = z6 - z4; Real_t dx31 = x3 - x1; Real_t dy31 = y3 - y1; Real_t dz31 = z3 - z1; Real_t dx72 = x7 - x2; Real_t dy72 = y7 - y2; Real_t dz72 = z7 - z2; Real_t dx43 = x4 - x3; Real_t dy43 = y4 - y3; Real_t dz43 = z4 - z3; Real_t dx57 = x5 - x7; Real_t dy57 = y5 - y7; Real_t dz57 = z5 - z7; Real_t dx14 = x1 - x4; Real_t dy14 = y1 - y4; Real_t dz14 = z1 - z4; Real_t dx25 = x2 - x5; Real_t dy25 = y2 - y5; Real_t dz25 = z2 - z5; #define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \ ((x1) * ((y2) * (z3) - (z2) * (y3)) + (x2) * ((z1) * (y3) - (y1) * (z3)) + \ (x3) * ((y1) * (z2) - (z1) * (y2))) Real_t volume = TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20, dy31 + dy72, dy63, dy20, dz31 + dz72, dz63, dz20) + TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70, dy43 + dy57, dy64, dy70, dz43 + dz57, dz64, dz70) + TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50, dy14 + dy25, dy61, dy50, dz14 + dz25, dz61, dz50); #undef TRIPLE_PRODUCT volume *= twelveth; return volume; } __host__ __device__ static inline Real_t CalcElemVolume(const Real_t x[8], const Real_t y[8], const Real_t z[8]) { return CalcElemVolume(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]); } __host__ __device__ static inline Real_t AreaFace(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3) { Real_t fx = (x2 - x0) - (x3 - x1); Real_t fy = (y2 - y0) - (y3 - y1); Real_t fz = (z2 - z0) - (z3 - z1); Real_t gx = (x2 - x0) + (x3 - x1); Real_t gy = (y2 - y0) + (y3 - y1); Real_t gz = (z2 - z0) + (z3 - z1); Real_t area = (fx * fx + fy * fy + fz * fz) * (gx * gx + gy * gy + gz * gz) - (fx * gx + fy * gy + fz * gz) * (fx * gx + fy * gy + fz * gz); return area; } __host__ __device__ static inline Real_t CalcElemCharacteristicLength( const Real_t x[8], const Real_t y[8], const Real_t z[8], const Real_t volume) { Real_t a, charLength = Real_t(0.0); a = AreaFace(x[0], x[1], x[2], x[3], y[0], y[1], y[2], y[3], z[0], z[1], z[2], z[3]); charLength = FMAX(a, charLength); a = AreaFace(x[4], x[5], x[6], x[7], y[4], y[5], y[6], y[7], z[4], z[5], z[6], z[7]); charLength = FMAX(a, charLength); a = AreaFace(x[0], x[1], x[5], x[4], y[0], y[1], y[5], y[4], z[0], z[1], z[5], z[4]); charLength = FMAX(a, charLength); a = AreaFace(x[1], x[2], x[6], x[5], y[1], y[2], y[6], y[5], z[1], z[2], z[6], z[5]); charLength = FMAX(a, charLength); a = AreaFace(x[2], x[3], x[7], x[6], y[2], y[3], y[7], y[6], z[2], z[3], z[7], z[6]); charLength = FMAX(a, charLength); a = AreaFace(x[3], x[0], x[4], x[7], y[3], y[0], y[4], y[7], z[3], z[0], z[4], z[7]); charLength = FMAX(a, charLength); charLength = Real_t(4.0) * volume / SQRT(charLength); return charLength; } __host__ __device__ static inline void CalcElemVelocityGradient( const Real_t* const xvel, const Real_t* const yvel, const Real_t* const zvel, const Real_t b[][8], const Real_t detJ, Real_t* const d) { const Real_t inv_detJ = Real_t(1.0) / detJ; Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz; const Real_t* const pfx = b[0]; const Real_t* const pfy = b[1]; const Real_t* const pfz = b[2]; d[0] = inv_detJ * (pfx[0] * (xvel[0] - xvel[6]) + pfx[1] * (xvel[1] - xvel[7]) + pfx[2] * (xvel[2] - xvel[4]) + pfx[3] * (xvel[3] - xvel[5])); d[1] = inv_detJ * (pfy[0] * (yvel[0] - yvel[6]) + pfy[1] * (yvel[1] - yvel[7]) + pfy[2] * (yvel[2] - yvel[4]) + pfy[3] * (yvel[3] - yvel[5])); d[2] = inv_detJ * (pfz[0] * (zvel[0] - zvel[6]) + pfz[1] * (zvel[1] - zvel[7]) + pfz[2] * (zvel[2] - zvel[4]) + pfz[3] * (zvel[3] - zvel[5])); dyddx = inv_detJ * (pfx[0] * (yvel[0] - yvel[6]) + pfx[1] * (yvel[1] - yvel[7]) + pfx[2] * (yvel[2] - yvel[4]) + pfx[3] * (yvel[3] - yvel[5])); dxddy = inv_detJ * (pfy[0] * (xvel[0] - xvel[6]) + pfy[1] * (xvel[1] - xvel[7]) + pfy[2] * (xvel[2] - xvel[4]) + pfy[3] * (xvel[3] - xvel[5])); dzddx = inv_detJ * (pfx[0] * (zvel[0] - zvel[6]) + pfx[1] * (zvel[1] - zvel[7]) + pfx[2] * (zvel[2] - zvel[4]) + pfx[3] * (zvel[3] - zvel[5])); dxddz = inv_detJ * (pfz[0] * (xvel[0] - xvel[6]) + pfz[1] * (xvel[1] - xvel[7]) + pfz[2] * (xvel[2] - xvel[4]) + pfz[3] * (xvel[3] - xvel[5])); dzddy = inv_detJ * (pfy[0] * (zvel[0] - zvel[6]) + pfy[1] * (zvel[1] - zvel[7]) + pfy[2] * (zvel[2] - zvel[4]) + pfy[3] * (zvel[3] - zvel[5])); dyddz = inv_detJ * (pfz[0] * (yvel[0] - yvel[6]) + pfz[1] * (yvel[1] - yvel[7]) + pfz[2] * (yvel[2] - yvel[4]) + pfz[3] * (yvel[3] - yvel[5])); d[5] = Real_t(.5) * (dxddy + dyddx); d[4] = Real_t(.5) * (dxddz + dzddx); d[3] = Real_t(.5) * (dzddy + dyddz); } __global__ void CalcKinematicsForElems_kernel(Index_t numElem, Real_t dt, Index_t* nodelist, Real_t* volo, Real_t* v, Real_t* x, Real_t* y, Real_t* z, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* vnew, Real_t* delv, Real_t* arealg, Real_t* dxx, Real_t* dyy, Real_t* dzz) { Real_t B[3][8]; /** shape function derivatives */ Real_t D[6]; Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; Real_t xd_local[8]; Real_t yd_local[8]; Real_t zd_local[8]; Real_t detJ = Real_t(0.0); int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numElem) { Real_t volume; Real_t relativeVolume; // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = nodelist[k + lnode * numElem]; x_local[lnode] = x[gnode]; y_local[lnode] = y[gnode]; z_local[lnode] = z[gnode]; } // volume calculations volume = CalcElemVolume(x_local, y_local, z_local); relativeVolume = volume / volo[k]; vnew[k] = relativeVolume; delv[k] = relativeVolume - v[k]; // set characteristic length arealg[k] = CalcElemCharacteristicLength(x_local, y_local, z_local, volume); // get nodal velocities from global array and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = nodelist[k + lnode * numElem]; xd_local[lnode] = xd[gnode]; yd_local[lnode] = yd[gnode]; zd_local[lnode] = zd[gnode]; } Real_t dt2 = Real_t(0.5) * dt; for (Index_t j = 0; j < 8; ++j) { x_local[j] -= dt2 * xd_local[j]; y_local[j] -= dt2 * yd_local[j]; z_local[j] -= dt2 * zd_local[j]; } CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &detJ); CalcElemVelocityGradient(xd_local, yd_local, zd_local, B, detJ, D); // put velocity gradient quantities into their global arrays. dxx[k] = D[0]; dyy[k] = D[1]; dzz[k] = D[2]; } } static inline void CalcKinematicsForElems_gpu(Index_t numElem, Real_t dt, hipStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcKinematicsForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, dt, meshGPU.m_nodelist, meshGPU.m_volo, meshGPU.m_v, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, meshGPU.m_vnew, meshGPU.m_delv, meshGPU.m_arealg, meshGPU.m_dxx, meshGPU.m_dyy, meshGPU.m_dzz); CUDA_DEBUGSYNC; } static inline void CalcKinematicsForElems_cpu(Index_t numElem, Real_t dt) { Real_t B[3][8]; /** shape function derivatives */ Real_t D[6]; Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; Real_t xd_local[8]; Real_t yd_local[8]; Real_t zd_local[8]; Real_t detJ = Real_t(0.0); // loop over all elements for (Index_t k = 0; k < numElem; ++k) { Real_t volume; Real_t relativeVolume; // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); x_local[lnode] = mesh.x(gnode); y_local[lnode] = mesh.y(gnode); z_local[lnode] = mesh.z(gnode); } // volume calculations volume = CalcElemVolume(x_local, y_local, z_local); relativeVolume = volume / mesh.volo(k); mesh.vnew(k) = relativeVolume; mesh.delv(k) = relativeVolume - mesh.v(k); // set characteristic length mesh.arealg(k) = CalcElemCharacteristicLength(x_local, y_local, z_local, volume); // get nodal velocities from global array and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); xd_local[lnode] = mesh.xd(gnode); yd_local[lnode] = mesh.yd(gnode); zd_local[lnode] = mesh.zd(gnode); } Real_t dt2 = Real_t(0.5) * dt; for (Index_t j = 0; j < 8; ++j) { x_local[j] -= dt2 * xd_local[j]; y_local[j] -= dt2 * yd_local[j]; z_local[j] -= dt2 * zd_local[j]; } CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &detJ); CalcElemVelocityGradient(xd_local, yd_local, zd_local, B, detJ, D); // put velocity gradient quantities into their global arrays. mesh.dxx(k) = D[0]; mesh.dyy(k) = D[1]; mesh.dzz(k) = D[2]; } } static inline void CalcKinematicsForElems(Index_t numElem, Real_t dt, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(nodelist); FC(volo); FC(v); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); CalcKinematicsForElems_cpu(numElem, dt); SG(vnew); SG(delv); SG(arealg); SG(dxx); SG(dyy); SG(dzz); } else { FG(nodelist); FG(volo); FG(v); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); CalcKinematicsForElems_gpu(numElem, dt, stream_app); SC(vnew); SC(delv); SC(arealg); SC(dxx); SC(dyy); SC(dzz); } } __global__ void CalcLagrangeElementsPart2_kernel(Index_t numElem, Real_t* dxx, Real_t* dyy, Real_t* dzz, Real_t* vdov) { int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numElem) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdovNew = dxx[k] + dyy[k] + dzz[k]; Real_t vdovthird = vdovNew / Real_t(3.0); // make the rate of deformation tensor deviatoric vdov[k] = vdovNew; dxx[k] -= vdovthird; dyy[k] -= vdovthird; dzz[k] -= vdovthird; // See if any volumes are negative, and take appropriate action. // if (mesh.vnew(k) <= Real_t(0.0)) //{ // exit(VolumeError) ; //} } } static inline void CalcLagrangeElementsPart2_gpu(hipStream_t stream_app) { Index_t numElem = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcLagrangeElementsPart2_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, meshGPU.m_dxx, meshGPU.m_dyy, meshGPU.m_dzz, meshGPU.m_vdov); CUDA_DEBUGSYNC; } static inline void CalcLagrangeElementsPart2_cpu() { Index_t numElem = mesh.numElem(); // element loop to do some stuff not included in the elemlib function. for (Index_t k = 0; k < numElem; ++k) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdov = mesh.dxx(k) + mesh.dyy(k) + mesh.dzz(k); Real_t vdovthird = vdov / Real_t(3.0); // make the rate of deformation tensor deviatoric mesh.vdov(k) = vdov; mesh.dxx(k) -= vdovthird; mesh.dyy(k) -= vdovthird; mesh.dzz(k) -= vdovthird; // See if any volumes are negative, and take appropriate action. if (mesh.vnew(k) <= Real_t(0.0)) { exit(VolumeError); } } } static inline void CalcLagrangeElementsPart2(int useCPU, hipStream_t stream_app) { if (useCPU) { FC(dxx); FC(dyy); FC(dzz); CalcLagrangeElementsPart2_cpu(); SG(vdov); SG(dxx); SG(dyy); SG(dzz); } else { FG(dxx); FG(dyy); FG(dzz); CalcLagrangeElementsPart2_gpu(stream_app); SC(vdov); SC(dxx); SC(dyy); SC(dzz); } } static inline void CalcLagrangeElements(Real_t deltatime, int useCPU, hipStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem > 0) { CalcKinematicsForElems(numElem, deltatime, useCPU, stream_app); CalcLagrangeElementsPart2(useCPU, stream_app); } } __global__ void CalcMonotonicQGradientsForElems_kernel(Index_t numElem, Index_t* nodelist, Real_t* x, Real_t* y, Real_t* z, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* volo, Real_t* vnew, Real_t* delx_zeta, Real_t* delv_zeta, Real_t* delx_xi, Real_t* delv_xi, Real_t* delx_eta, Real_t* delv_eta) { #define SUM4(a, b, c, d) (a + b + c + d) const Real_t ptiny = Real_t(1.e-36); int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElem) { Real_t ax, ay, az; Real_t dxv, dyv, dzv; Index_t n0 = nodelist[i + 0 * numElem]; Index_t n1 = nodelist[i + 1 * numElem]; Index_t n2 = nodelist[i + 2 * numElem]; Index_t n3 = nodelist[i + 3 * numElem]; Index_t n4 = nodelist[i + 4 * numElem]; Index_t n5 = nodelist[i + 5 * numElem]; Index_t n6 = nodelist[i + 6 * numElem]; Index_t n7 = nodelist[i + 7 * numElem]; Real_t x0 = x[n0]; Real_t x1 = x[n1]; Real_t x2 = x[n2]; Real_t x3 = x[n3]; Real_t x4 = x[n4]; Real_t x5 = x[n5]; Real_t x6 = x[n6]; Real_t x7 = x[n7]; Real_t y0 = y[n0]; Real_t y1 = y[n1]; Real_t y2 = y[n2]; Real_t y3 = y[n3]; Real_t y4 = y[n4]; Real_t y5 = y[n5]; Real_t y6 = y[n6]; Real_t y7 = y[n7]; Real_t z0 = z[n0]; Real_t z1 = z[n1]; Real_t z2 = z[n2]; Real_t z3 = z[n3]; Real_t z4 = z[n4]; Real_t z5 = z[n5]; Real_t z6 = z[n6]; Real_t z7 = z[n7]; Real_t xv0 = xd[n0]; Real_t xv1 = xd[n1]; Real_t xv2 = xd[n2]; Real_t xv3 = xd[n3]; Real_t xv4 = xd[n4]; Real_t xv5 = xd[n5]; Real_t xv6 = xd[n6]; Real_t xv7 = xd[n7]; Real_t yv0 = yd[n0]; Real_t yv1 = yd[n1]; Real_t yv2 = yd[n2]; Real_t yv3 = yd[n3]; Real_t yv4 = yd[n4]; Real_t yv5 = yd[n5]; Real_t yv6 = yd[n6]; Real_t yv7 = yd[n7]; Real_t zv0 = zd[n0]; Real_t zv1 = zd[n1]; Real_t zv2 = zd[n2]; Real_t zv3 = zd[n3]; Real_t zv4 = zd[n4]; Real_t zv5 = zd[n5]; Real_t zv6 = zd[n6]; Real_t zv7 = zd[n7]; Real_t vol = volo[i] * vnew[i]; Real_t norm = Real_t(1.0) / (vol + ptiny); Real_t dxj = Real_t(-0.25) * (SUM4(x0, x1, x5, x4) - SUM4(x3, x2, x6, x7)); Real_t dyj = Real_t(-0.25) * (SUM4(y0, y1, y5, y4) - SUM4(y3, y2, y6, y7)); Real_t dzj = Real_t(-0.25) * (SUM4(z0, z1, z5, z4) - SUM4(z3, z2, z6, z7)); Real_t dxi = Real_t(0.25) * (SUM4(x1, x2, x6, x5) - SUM4(x0, x3, x7, x4)); Real_t dyi = Real_t(0.25) * (SUM4(y1, y2, y6, y5) - SUM4(y0, y3, y7, y4)); Real_t dzi = Real_t(0.25) * (SUM4(z1, z2, z6, z5) - SUM4(z0, z3, z7, z4)); Real_t dxk = Real_t(0.25) * (SUM4(x4, x5, x6, x7) - SUM4(x0, x1, x2, x3)); Real_t dyk = Real_t(0.25) * (SUM4(y4, y5, y6, y7) - SUM4(y0, y1, y2, y3)); Real_t dzk = Real_t(0.25) * (SUM4(z4, z5, z6, z7) - SUM4(z0, z1, z2, z3)); /* find delvk and delxk ( i cross j ) */ ax = dyi * dzj - dzi * dyj; ay = dzi * dxj - dxi * dzj; az = dxi * dyj - dyi * dxj; delx_zeta[i] = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv4, xv5, xv6, xv7) - SUM4(xv0, xv1, xv2, xv3)); dyv = Real_t(0.25) * (SUM4(yv4, yv5, yv6, yv7) - SUM4(yv0, yv1, yv2, yv3)); dzv = Real_t(0.25) * (SUM4(zv4, zv5, zv6, zv7) - SUM4(zv0, zv1, zv2, zv3)); delv_zeta[i] = ax * dxv + ay * dyv + az * dzv; /* find delxi and delvi ( j cross k ) */ ax = dyj * dzk - dzj * dyk; ay = dzj * dxk - dxj * dzk; az = dxj * dyk - dyj * dxk; delx_xi[i] = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv1, xv2, xv6, xv5) - SUM4(xv0, xv3, xv7, xv4)); dyv = Real_t(0.25) * (SUM4(yv1, yv2, yv6, yv5) - SUM4(yv0, yv3, yv7, yv4)); dzv = Real_t(0.25) * (SUM4(zv1, zv2, zv6, zv5) - SUM4(zv0, zv3, zv7, zv4)); delv_xi[i] = ax * dxv + ay * dyv + az * dzv; /* find delxj and delvj ( k cross i ) */ ax = dyk * dzi - dzk * dyi; ay = dzk * dxi - dxk * dzi; az = dxk * dyi - dyk * dxi; delx_eta[i] = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(-0.25) * (SUM4(xv0, xv1, xv5, xv4) - SUM4(xv3, xv2, xv6, xv7)); dyv = Real_t(-0.25) * (SUM4(yv0, yv1, yv5, yv4) - SUM4(yv3, yv2, yv6, yv7)); dzv = Real_t(-0.25) * (SUM4(zv0, zv1, zv5, zv4) - SUM4(zv3, zv2, zv6, zv7)); delv_eta[i] = ax * dxv + ay * dyv + az * dzv; } #undef SUM4 } static inline void CalcMonotonicQGradientsForElems_gpu( hipStream_t stream_app) { Index_t numElem = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcMonotonicQGradientsForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, meshGPU.m_volo, meshGPU.m_vnew, meshGPU.m_delx_zeta, meshGPU.m_delv_zeta, meshGPU.m_delx_xi, meshGPU.m_delv_xi, meshGPU.m_delx_eta, meshGPU.m_delv_eta); CUDA_DEBUGSYNC; } static inline void CalcMonotonicQGradientsForElems_cpu() { #define SUM4(a, b, c, d) (a + b + c + d) Index_t numElem = mesh.numElem(); const Real_t ptiny = Real_t(1.e-36); for (Index_t i = 0; i < numElem; ++i) { Real_t ax, ay, az; Real_t dxv, dyv, dzv; Index_t n0 = mesh.nodelist(i, 0); Index_t n1 = mesh.nodelist(i, 1); Index_t n2 = mesh.nodelist(i, 2); Index_t n3 = mesh.nodelist(i, 3); Index_t n4 = mesh.nodelist(i, 4); Index_t n5 = mesh.nodelist(i, 5); Index_t n6 = mesh.nodelist(i, 6); Index_t n7 = mesh.nodelist(i, 7); Real_t x0 = mesh.x(n0); Real_t x1 = mesh.x(n1); Real_t x2 = mesh.x(n2); Real_t x3 = mesh.x(n3); Real_t x4 = mesh.x(n4); Real_t x5 = mesh.x(n5); Real_t x6 = mesh.x(n6); Real_t x7 = mesh.x(n7); Real_t y0 = mesh.y(n0); Real_t y1 = mesh.y(n1); Real_t y2 = mesh.y(n2); Real_t y3 = mesh.y(n3); Real_t y4 = mesh.y(n4); Real_t y5 = mesh.y(n5); Real_t y6 = mesh.y(n6); Real_t y7 = mesh.y(n7); Real_t z0 = mesh.z(n0); Real_t z1 = mesh.z(n1); Real_t z2 = mesh.z(n2); Real_t z3 = mesh.z(n3); Real_t z4 = mesh.z(n4); Real_t z5 = mesh.z(n5); Real_t z6 = mesh.z(n6); Real_t z7 = mesh.z(n7); Real_t xv0 = mesh.xd(n0); Real_t xv1 = mesh.xd(n1); Real_t xv2 = mesh.xd(n2); Real_t xv3 = mesh.xd(n3); Real_t xv4 = mesh.xd(n4); Real_t xv5 = mesh.xd(n5); Real_t xv6 = mesh.xd(n6); Real_t xv7 = mesh.xd(n7); Real_t yv0 = mesh.yd(n0); Real_t yv1 = mesh.yd(n1); Real_t yv2 = mesh.yd(n2); Real_t yv3 = mesh.yd(n3); Real_t yv4 = mesh.yd(n4); Real_t yv5 = mesh.yd(n5); Real_t yv6 = mesh.yd(n6); Real_t yv7 = mesh.yd(n7); Real_t zv0 = mesh.zd(n0); Real_t zv1 = mesh.zd(n1); Real_t zv2 = mesh.zd(n2); Real_t zv3 = mesh.zd(n3); Real_t zv4 = mesh.zd(n4); Real_t zv5 = mesh.zd(n5); Real_t zv6 = mesh.zd(n6); Real_t zv7 = mesh.zd(n7); Real_t vol = mesh.volo(i) * mesh.vnew(i); Real_t norm = Real_t(1.0) / (vol + ptiny); Real_t dxj = Real_t(-0.25) * (SUM4(x0, x1, x5, x4) - SUM4(x3, x2, x6, x7)); Real_t dyj = Real_t(-0.25) * (SUM4(y0, y1, y5, y4) - SUM4(y3, y2, y6, y7)); Real_t dzj = Real_t(-0.25) * (SUM4(z0, z1, z5, z4) - SUM4(z3, z2, z6, z7)); Real_t dxi = Real_t(0.25) * (SUM4(x1, x2, x6, x5) - SUM4(x0, x3, x7, x4)); Real_t dyi = Real_t(0.25) * (SUM4(y1, y2, y6, y5) - SUM4(y0, y3, y7, y4)); Real_t dzi = Real_t(0.25) * (SUM4(z1, z2, z6, z5) - SUM4(z0, z3, z7, z4)); Real_t dxk = Real_t(0.25) * (SUM4(x4, x5, x6, x7) - SUM4(x0, x1, x2, x3)); Real_t dyk = Real_t(0.25) * (SUM4(y4, y5, y6, y7) - SUM4(y0, y1, y2, y3)); Real_t dzk = Real_t(0.25) * (SUM4(z4, z5, z6, z7) - SUM4(z0, z1, z2, z3)); /* find delvk and delxk ( i cross j ) */ ax = dyi * dzj - dzi * dyj; ay = dzi * dxj - dxi * dzj; az = dxi * dyj - dyi * dxj; mesh.delx_zeta(i) = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv4, xv5, xv6, xv7) - SUM4(xv0, xv1, xv2, xv3)); dyv = Real_t(0.25) * (SUM4(yv4, yv5, yv6, yv7) - SUM4(yv0, yv1, yv2, yv3)); dzv = Real_t(0.25) * (SUM4(zv4, zv5, zv6, zv7) - SUM4(zv0, zv1, zv2, zv3)); mesh.delv_zeta(i) = ax * dxv + ay * dyv + az * dzv; /* find delxi and delvi ( j cross k ) */ ax = dyj * dzk - dzj * dyk; ay = dzj * dxk - dxj * dzk; az = dxj * dyk - dyj * dxk; mesh.delx_xi(i) = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv1, xv2, xv6, xv5) - SUM4(xv0, xv3, xv7, xv4)); dyv = Real_t(0.25) * (SUM4(yv1, yv2, yv6, yv5) - SUM4(yv0, yv3, yv7, yv4)); dzv = Real_t(0.25) * (SUM4(zv1, zv2, zv6, zv5) - SUM4(zv0, zv3, zv7, zv4)); mesh.delv_xi(i) = ax * dxv + ay * dyv + az * dzv; /* find delxj and delvj ( k cross i ) */ ax = dyk * dzi - dzk * dyi; ay = dzk * dxi - dxk * dzi; az = dxk * dyi - dyk * dxi; mesh.delx_eta(i) = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(-0.25) * (SUM4(xv0, xv1, xv5, xv4) - SUM4(xv3, xv2, xv6, xv7)); dyv = Real_t(-0.25) * (SUM4(yv0, yv1, yv5, yv4) - SUM4(yv3, yv2, yv6, yv7)); dzv = Real_t(-0.25) * (SUM4(zv0, zv1, zv5, zv4) - SUM4(zv3, zv2, zv6, zv7)); mesh.delv_eta(i) = ax * dxv + ay * dyv + az * dzv; } #undef SUM4 } static inline void CalcMonotonicQGradientsForElems(int useCPU, hipStream_t stream_app) { if (useCPU) { FC(nodelist); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); FC(volo); FC(vnew); CalcMonotonicQGradientsForElems_cpu(); SG(delx_zeta); SG(delv_zeta); SG(delx_xi); SG(delv_xi); SG(delx_eta); SG(delv_eta); } else { FG(nodelist); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); FG(volo); FG(vnew); CalcMonotonicQGradientsForElems_gpu(stream_app); SC(delx_zeta); SC(delv_zeta); SC(delx_xi); SC(delv_xi); SC(delx_eta); SC(delv_eta); } } __global__ void CalcMonotonicQRegionForElems_kernel(Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, Index_t* matElemlist, Index_t* elemBC, Index_t* lxim, Index_t* lxip, Index_t* letam, Index_t* letap, Index_t* lzetam, Index_t* lzetap, Real_t* delv_xi, Real_t* delv_eta, Real_t* delv_zeta, Real_t* delx_xi, Real_t* delx_eta, Real_t* delx_zeta, Real_t* vdov, Real_t* elemMass, Real_t* volo, Real_t* vnew, Real_t* qq, Real_t* ql) { int ielem = blockDim.x * blockIdx.x + threadIdx.x; if (ielem < elength) { Real_t qlin, qquad; Real_t phixi, phieta, phizeta; Index_t i = matElemlist[ielem]; Int_t bcMask = elemBC[i]; Real_t delvm, delvp; /* phixi */ Real_t norm = Real_t(1.) / (delv_xi[i] + ptiny); switch (bcMask & XI_M) { case 0: delvm = delv_xi[lxim[i]]; break; case XI_M_SYMM: delvm = delv_xi[i]; break; case XI_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & XI_P) { case 0: delvp = delv_xi[lxip[i]]; break; case XI_P_SYMM: delvp = delv_xi[i]; break; case XI_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phixi = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phixi) phixi = delvm; if (delvp < phixi) phixi = delvp; if (phixi < Real_t(0.)) phixi = Real_t(0.); if (phixi > monoq_max_slope) phixi = monoq_max_slope; /* phieta */ norm = Real_t(1.) / (delv_eta[i] + ptiny); switch (bcMask & ETA_M) { case 0: delvm = delv_eta[letam[i]]; break; case ETA_M_SYMM: delvm = delv_eta[i]; break; case ETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ETA_P) { case 0: delvp = delv_eta[letap[i]]; break; case ETA_P_SYMM: delvp = delv_eta[i]; break; case ETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phieta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phieta) phieta = delvm; if (delvp < phieta) phieta = delvp; if (phieta < Real_t(0.)) phieta = Real_t(0.); if (phieta > monoq_max_slope) phieta = monoq_max_slope; /* phizeta */ norm = Real_t(1.) / (delv_zeta[i] + ptiny); switch (bcMask & ZETA_M) { case 0: delvm = delv_zeta[lzetam[i]]; break; case ZETA_M_SYMM: delvm = delv_zeta[i]; break; case ZETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ZETA_P) { case 0: delvp = delv_zeta[lzetap[i]]; break; case ZETA_P_SYMM: delvp = delv_zeta[i]; break; case ZETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phizeta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phizeta) phizeta = delvm; if (delvp < phizeta) phizeta = delvp; if (phizeta < Real_t(0.)) phizeta = Real_t(0.); if (phizeta > monoq_max_slope) phizeta = monoq_max_slope; /* Remove length scale */ if (vdov[i] > Real_t(0.)) { qlin = Real_t(0.); qquad = Real_t(0.); } else { Real_t delvxxi = delv_xi[i] * delx_xi[i]; Real_t delvxeta = delv_eta[i] * delx_eta[i]; Real_t delvxzeta = delv_zeta[i] * delx_zeta[i]; if (delvxxi > Real_t(0.)) delvxxi = Real_t(0.); if (delvxeta > Real_t(0.)) delvxeta = Real_t(0.); if (delvxzeta > Real_t(0.)) delvxzeta = Real_t(0.); Real_t rho = elemMass[i] / (volo[i] * vnew[i]); qlin = -qlc_monoq * rho * (delvxxi * (Real_t(1.) - phixi) + delvxeta * (Real_t(1.) - phieta) + delvxzeta * (Real_t(1.) - phizeta)); qquad = qqc_monoq * rho * (delvxxi * delvxxi * (Real_t(1.) - phixi * phixi) + delvxeta * delvxeta * (Real_t(1.) - phieta * phieta) + delvxzeta * delvxzeta * (Real_t(1.) - phizeta * phizeta)); } qq[i] = qquad; ql[i] = qlin; } } static inline void CalcMonotonicQRegionForElems_gpu( // parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, hipStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(elength, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcMonotonicQRegionForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, elength, meshGPU.m_matElemlist, meshGPU.m_elemBC, meshGPU.m_lxim, meshGPU.m_lxip, meshGPU.m_letam, meshGPU.m_letap, meshGPU.m_lzetam, meshGPU.m_lzetap, meshGPU.m_delv_xi, meshGPU.m_delv_eta, meshGPU.m_delv_zeta, meshGPU.m_delx_xi, meshGPU.m_delx_eta, meshGPU.m_delx_zeta, meshGPU.m_vdov, meshGPU.m_elemMass, meshGPU.m_volo, meshGPU.m_vnew, meshGPU.m_qq, meshGPU.m_ql); CUDA_DEBUGSYNC; } static inline void CalcMonotonicQRegionForElems_cpu( // parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength) { for (Index_t ielem = 0; ielem < elength; ++ielem) { Real_t qlin, qquad; Real_t phixi, phieta, phizeta; Index_t i = mesh.matElemlist(ielem); Int_t bcMask = mesh.elemBC(i); Real_t delvm, delvp; /* phixi */ Real_t norm = Real_t(1.) / (mesh.delv_xi(i) + ptiny); switch (bcMask & XI_M) { case 0: delvm = mesh.delv_xi(mesh.lxim(i)); break; case XI_M_SYMM: delvm = mesh.delv_xi(i); break; case XI_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & XI_P) { case 0: delvp = mesh.delv_xi(mesh.lxip(i)); break; case XI_P_SYMM: delvp = mesh.delv_xi(i); break; case XI_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phixi = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phixi) phixi = delvm; if (delvp < phixi) phixi = delvp; if (phixi < Real_t(0.)) phixi = Real_t(0.); if (phixi > monoq_max_slope) phixi = monoq_max_slope; /* phieta */ norm = Real_t(1.) / (mesh.delv_eta(i) + ptiny); switch (bcMask & ETA_M) { case 0: delvm = mesh.delv_eta(mesh.letam(i)); break; case ETA_M_SYMM: delvm = mesh.delv_eta(i); break; case ETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ETA_P) { case 0: delvp = mesh.delv_eta(mesh.letap(i)); break; case ETA_P_SYMM: delvp = mesh.delv_eta(i); break; case ETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phieta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phieta) phieta = delvm; if (delvp < phieta) phieta = delvp; if (phieta < Real_t(0.)) phieta = Real_t(0.); if (phieta > monoq_max_slope) phieta = monoq_max_slope; /* phizeta */ norm = Real_t(1.) / (mesh.delv_zeta(i) + ptiny); switch (bcMask & ZETA_M) { case 0: delvm = mesh.delv_zeta(mesh.lzetam(i)); break; case ZETA_M_SYMM: delvm = mesh.delv_zeta(i); break; case ZETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ZETA_P) { case 0: delvp = mesh.delv_zeta(mesh.lzetap(i)); break; case ZETA_P_SYMM: delvp = mesh.delv_zeta(i); break; case ZETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phizeta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phizeta) phizeta = delvm; if (delvp < phizeta) phizeta = delvp; if (phizeta < Real_t(0.)) phizeta = Real_t(0.); if (phizeta > monoq_max_slope) phizeta = monoq_max_slope; /* Remove length scale */ if (mesh.vdov(i) > Real_t(0.)) { qlin = Real_t(0.); qquad = Real_t(0.); } else { Real_t delvxxi = mesh.delv_xi(i) * mesh.delx_xi(i); Real_t delvxeta = mesh.delv_eta(i) * mesh.delx_eta(i); Real_t delvxzeta = mesh.delv_zeta(i) * mesh.delx_zeta(i); if (delvxxi > Real_t(0.)) delvxxi = Real_t(0.); if (delvxeta > Real_t(0.)) delvxeta = Real_t(0.); if (delvxzeta > Real_t(0.)) delvxzeta = Real_t(0.); Real_t rho = mesh.elemMass(i) / (mesh.volo(i) * mesh.vnew(i)); qlin = -qlc_monoq * rho * (delvxxi * (Real_t(1.) - phixi) + delvxeta * (Real_t(1.) - phieta) + delvxzeta * (Real_t(1.) - phizeta)); qquad = qqc_monoq * rho * (delvxxi * delvxxi * (Real_t(1.) - phixi * phixi) + delvxeta * delvxeta * (Real_t(1.) - phieta * phieta) + delvxzeta * delvxzeta * (Real_t(1.) - phizeta * phizeta)); } mesh.qq(i) = qquad; mesh.ql(i) = qlin; } } static inline void CalcMonotonicQRegionForElems( // parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, int useCPU, hipStream_t stream_app) { if (useCPU) { FC(matElemlist); FC(elemBC); FC(lxim); FC(lxip); FC(letam); FC(letap); FC(lzetam); FC(lzetap); FC(delv_xi); FC(delv_eta); FC(delv_zeta); FC(delx_xi); FC(delx_eta); FC(delx_zeta); FC(vdov); FC(elemMass); FC(volo); FC(vnew); CalcMonotonicQRegionForElems_cpu(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, elength); SG(qq); SG(ql); } else { FG(matElemlist); FG(elemBC); FG(lxim); FG(lxip); FG(letam); FG(letap); FG(lzetam); FG(lzetap); FG(delv_xi); FG(delv_eta); FG(delv_zeta); FG(delx_xi); FG(delx_eta); FG(delx_zeta); FG(vdov); FG(elemMass); FG(volo); FG(vnew); CalcMonotonicQRegionForElems_gpu(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, elength, stream_app); SC(qq); SC(ql); } } static inline void CalcMonotonicQForElems(int useCPU, hipStream_t stream_app) { // // initialize parameters // const Real_t ptiny = Real_t(1.e-36); Real_t monoq_max_slope = mesh.monoq_max_slope(); Real_t monoq_limiter_mult = mesh.monoq_limiter_mult(); // // calculate the monotonic q for pure regions // Index_t elength = mesh.numElem(); if (elength > 0) { Real_t qlc_monoq = mesh.qlc_monoq(); Real_t qqc_monoq = mesh.qqc_monoq(); CalcMonotonicQRegionForElems( // parameters qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, // the elemset length elength, useCPU, stream_app); } } static inline void CalcQForElems(int useCPU, hipStream_t stream_app) { Real_t qstop = mesh.qstop(); Index_t numElem = mesh.numElem(); // // MONOTONIC Q option // /* Calculate velocity gradients */ CalcMonotonicQGradientsForElems(useCPU, stream_app); /* Transfer veloctiy gradients in the first order elements */ /* problem->commElements->Transfer(CommElements::monoQ) ; */ CalcMonotonicQForElems(useCPU, stream_app); /* Don't allow excessive artificial viscosity */ /* if (numElem != 0) { Index_t idx = -1; for (Index_t i=0; i<numElem; ++i) { if ( mesh.q(i) > qstop ) { idx = i ; break ; } } if(idx >= 0) { exit(QStopError) ; } } */ } __global__ void CalcPressureForElems_kernel(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t* vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length, Real_t c1s) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { bvc[i] = c1s * (compression[i] + Real_t(1.)); pbvc[i] = c1s; p_new[i] = bvc[i] * e_old[i]; if (FABS(p_new[i]) < p_cut) p_new[i] = Real_t(0.0); if (vnewc[i] >= eosvmax) /* impossible condition here? */ p_new[i] = Real_t(0.0); if (p_new[i] < pmin) p_new[i] = pmin; } } static inline void CalcPressureForElems_gpu(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t* vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length, hipStream_t stream_app) { Real_t c1s = Real_t(2.0) / Real_t(3.0); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcPressureForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, p_new, bvc, pbvc, e_old, compression, vnewc, pmin, p_cut, eosvmax, length, c1s); CUDA_DEBUGSYNC; } static inline void CalcPressureForElems_cpu(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t* vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length) { Real_t c1s = Real_t(2.0) / Real_t(3.0); for (Index_t i = 0; i < length; ++i) { bvc[i] = c1s * (compression[i] + Real_t(1.)); pbvc[i] = c1s; } for (Index_t i = 0; i < length; ++i) { p_new[i] = bvc[i] * e_old[i]; if (FABS(p_new[i]) < p_cut) p_new[i] = Real_t(0.0); if (vnewc[i] >= eosvmax) /* impossible condition here? */ p_new[i] = Real_t(0.0); if (p_new[i] < pmin) p_new[i] = pmin; } } __global__ void CalcEnergyForElemsPart1_kernel(Index_t length, Real_t emin, Real_t* e_old, Real_t* delvc, Real_t* p_old, Real_t* q_old, Real_t* work, Real_t* e_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i]) + Real_t(0.5) * work[i]; if (e_new[i] < emin) { e_new[i] = emin; } } } __global__ void CalcEnergyForElemsPart2_kernel(Index_t length, Real_t rho0, Real_t e_cut, Real_t emin, Real_t* compHalfStep, Real_t* delvc, Real_t* pbvc, Real_t* bvc, Real_t* pHalfStep, Real_t* ql, Real_t* qq, Real_t* p_old, Real_t* q_old, Real_t* work, Real_t* e_new, Real_t* q_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]); if (delvc[i] > Real_t(0.)) { q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vhalf * vhalf * bvc[i] * pHalfStep[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] + Real_t(0.5) * delvc[i] * (Real_t(3.0) * (p_old[i] + q_old[i]) - Real_t(4.0) * (pHalfStep[i] + q_new[i])); e_new[i] += Real_t(0.5) * work[i]; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } } __global__ void CalcEnergyForElemsPart3_kernel(Index_t length, Real_t rho0, Real_t sixth, Real_t e_cut, Real_t emin, Real_t* pbvc, Real_t* vnewc, Real_t* bvc, Real_t* p_new, Real_t* ql, Real_t* qq, Real_t* p_old, Real_t* q_old, Real_t* pHalfStep, Real_t* q_new, Real_t* delvc, Real_t* e_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Real_t q_tilde; if (delvc[i] > Real_t(0.)) { q_tilde = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_tilde = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] - (Real_t(7.0) * (p_old[i] + q_old[i]) - Real_t(8.0) * (pHalfStep[i] + q_new[i]) + (p_new[i] + q_tilde)) * delvc[i] * sixth; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } } __global__ void CalcEnergyForElemsPart4_kernel(Index_t length, Real_t rho0, Real_t q_cut, Real_t* delvc, Real_t* pbvc, Real_t* e_new, Real_t* vnewc, Real_t* bvc, Real_t* p_new, Real_t* ql, Real_t* qq, Real_t* q_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { if (delvc[i] <= Real_t(0.)) { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.); } } } static inline void CalcEnergyForElems_gpu(Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* bvc, Real_t* pbvc, Real_t* p_old, Real_t* e_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin, Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin, Real_t* qq, Real_t* ql, Real_t rho0, Real_t eosvmax, Index_t length, hipStream_t stream_app) { const Real_t sixth = Real_t(1.0) / Real_t(6.0); Real_t* pHalfStep; dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); CUDA(hipMalloc(&pHalfStep, sizeof(Real_t) * length)); hipLaunchKernelGGL(( CalcEnergyForElemsPart1_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, emin, e_old, delvc, p_old, q_old, work, e_new); CUDA_DEBUGSYNC; CalcPressureForElems_gpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length, stream_app); hipLaunchKernelGGL(( CalcEnergyForElemsPart2_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, rho0, e_cut, emin, compHalfStep, delvc, pbvc, bvc, pHalfStep, ql, qq, p_old, q_old, work, e_new, q_new); CUDA_DEBUGSYNC; CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, stream_app); hipLaunchKernelGGL(( CalcEnergyForElemsPart3_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, rho0, sixth, e_cut, emin, pbvc, vnewc, bvc, p_new, ql, qq, p_old, q_old, pHalfStep, q_new, delvc, e_new); CUDA_DEBUGSYNC; CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, stream_app); hipLaunchKernelGGL(( CalcEnergyForElemsPart4_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, rho0, q_cut, delvc, pbvc, e_new, vnewc, bvc, p_new, ql, qq, q_new); CUDA_DEBUGSYNC; // CUDA( hipFree(pHalfStep) ); return; } static inline void CalcEnergyForElems_cpu(Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* bvc, Real_t* pbvc, Real_t* p_old, Real_t* e_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin, Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin, Real_t* qq, Real_t* ql, Real_t rho0, Real_t eosvmax, Index_t length) { const Real_t sixth = Real_t(1.0) / Real_t(6.0); Real_t* pHalfStep = Allocate<Real_t>(length); for (Index_t i = 0; i < length; ++i) { e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i]) + Real_t(0.5) * work[i]; if (e_new[i] < emin) { e_new[i] = emin; } } CalcPressureForElems_cpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length); for (Index_t i = 0; i < length; ++i) { Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]); if (delvc[i] > Real_t(0.)) { q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vhalf * vhalf * bvc[i] * pHalfStep[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] + Real_t(0.5) * delvc[i] * (Real_t(3.0) * (p_old[i] + q_old[i]) - Real_t(4.0) * (pHalfStep[i] + q_new[i])); } for (Index_t i = 0; i < length; ++i) { e_new[i] += Real_t(0.5) * work[i]; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length); for (Index_t i = 0; i < length; ++i) { Real_t q_tilde; if (delvc[i] > Real_t(0.)) { q_tilde = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_tilde = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] - (Real_t(7.0) * (p_old[i] + q_old[i]) - Real_t(8.0) * (pHalfStep[i] + q_new[i]) + (p_new[i] + q_tilde)) * delvc[i] * sixth; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length); for (Index_t i = 0; i < length; ++i) { if (delvc[i] <= Real_t(0.)) { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.); } } Release(&pHalfStep); return; } __global__ void CalcSoundSpeedForElems_kernel(Real_t* vnewc, Real_t rho0, Real_t* enewc, Real_t* pnewc, Real_t* pbvc, Real_t* bvc, Real_t ss4o3, Index_t nz, Index_t* matElemlist, Real_t* ss) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < nz) { Index_t iz = matElemlist[i]; Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] * bvc[i] * pnewc[i]) / rho0; if (ssTmp <= Real_t(1.111111e-36)) { ssTmp = Real_t(1.111111e-36); } ss[iz] = SQRT(ssTmp); } } static inline void CalcSoundSpeedForElems_gpu(Real_t* vnewc, Real_t rho0, Real_t* enewc, Real_t* pnewc, Real_t* pbvc, Real_t* bvc, Real_t ss4o3, Index_t nz, hipStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(nz, dimBlock.x), 1, 1); hipLaunchKernelGGL(( CalcSoundSpeedForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, vnewc, rho0, enewc, pnewc, pbvc, bvc, ss4o3, nz, meshGPU.m_matElemlist, meshGPU.m_ss); CUDA_DEBUGSYNC; } static inline void CalcSoundSpeedForElems_cpu(Real_t* vnewc, Real_t rho0, Real_t* enewc, Real_t* pnewc, Real_t* pbvc, Real_t* bvc, Real_t ss4o3, Index_t nz) { for (Index_t i = 0; i < nz; ++i) { Index_t iz = mesh.matElemlist(i); Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] * bvc[i] * pnewc[i]) / rho0; if (ssTmp <= Real_t(1.111111e-36)) { ssTmp = Real_t(1.111111e-36); } mesh.ss(iz) = SQRT(ssTmp); } } __global__ void EvalEOSForElemsPart1_kernel(Index_t length, Real_t eosvmin, Real_t eosvmax, Index_t* matElemlist, Real_t* e, Real_t* delv, Real_t* p, Real_t* q, Real_t* qq, Real_t* ql, Real_t* vnewc, Real_t* e_old, Real_t* delvc, Real_t* p_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* qq_old, Real_t* ql_old, Real_t* work) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Index_t zidx = matElemlist[i]; e_old[i] = e[zidx]; delvc[i] = delv[zidx]; p_old[i] = p[zidx]; q_old[i] = q[zidx]; Real_t vchalf; compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.); vchalf = vnewc[i] - delvc[i] * Real_t(.5); compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.); if (eosvmin != Real_t(0.)) { if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */ compHalfStep[i] = compression[i]; } } if (eosvmax != Real_t(0.)) { if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */ p_old[i] = Real_t(0.); compression[i] = Real_t(0.); compHalfStep[i] = Real_t(0.); } } qq_old[i] = qq[zidx]; ql_old[i] = ql[zidx]; work[i] = Real_t(0.); } } __global__ void EvalEOSForElemsPart2_kernel(Index_t length, Index_t* matElemlist, Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* p, Real_t* e, Real_t* q) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Index_t zidx = matElemlist[i]; p[zidx] = p_new[i]; e[zidx] = e_new[i]; q[zidx] = q_new[i]; } } static inline void EvalEOSForElems_gpu(Real_t* vnewc, Index_t length, hipStream_t stream_app) { Real_t e_cut = mesh.e_cut(); Real_t p_cut = mesh.p_cut(); Real_t ss4o3 = mesh.ss4o3(); Real_t q_cut = mesh.q_cut(); Real_t eosvmax = mesh.eosvmax(); Real_t eosvmin = mesh.eosvmin(); Real_t pmin = mesh.pmin(); Real_t emin = mesh.emin(); Real_t rho0 = mesh.refdens(); Real_t *e_old, *delvc, *p_old, *q_old; Real_t *compression, *compHalfStep; Real_t *qq, *ql, *work, *p_new, *e_new, *q_new, *bvc, *pbvc; CUDA(hipMalloc(&e_old, sizeof(Real_t) * length)); CUDA(hipMalloc(&delvc, sizeof(Real_t) * length)); CUDA(hipMalloc(&p_old, sizeof(Real_t) * length)); CUDA(hipMalloc(&q_old, sizeof(Real_t) * length)); CUDA(hipMalloc(&compression, sizeof(Real_t) * length)); CUDA(hipMalloc(&compHalfStep, sizeof(Real_t) * length)); CUDA(hipMalloc(&qq, sizeof(Real_t) * length)); CUDA(hipMalloc(&ql, sizeof(Real_t) * length)); CUDA(hipMalloc(&work, sizeof(Real_t) * length)); CUDA(hipMalloc(&p_new, sizeof(Real_t) * length)); CUDA(hipMalloc(&e_new, sizeof(Real_t) * length)); CUDA(hipMalloc(&q_new, sizeof(Real_t) * length)); CUDA(hipMalloc(&bvc, sizeof(Real_t) * length)); CUDA(hipMalloc(&pbvc, sizeof(Real_t) * length)); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); hipLaunchKernelGGL(( EvalEOSForElemsPart1_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, eosvmin, eosvmax, meshGPU.m_matElemlist, meshGPU.m_e, meshGPU.m_delv, meshGPU.m_p, meshGPU.m_q, meshGPU.m_qq, meshGPU.m_ql, vnewc, e_old, delvc, p_old, q_old, compression, compHalfStep, qq, ql, work); CUDA_DEBUGSYNC; CalcEnergyForElems_gpu(p_new, e_new, q_new, bvc, pbvc, p_old, e_old, q_old, compression, compHalfStep, vnewc, work, delvc, pmin, p_cut, e_cut, q_cut, emin, qq, ql, rho0, eosvmax, length, stream_app); hipLaunchKernelGGL(( EvalEOSForElemsPart2_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, meshGPU.m_matElemlist, p_new, e_new, q_new, meshGPU.m_p, meshGPU.m_e, meshGPU.m_q); CUDA_DEBUGSYNC; CalcSoundSpeedForElems_gpu(vnewc, rho0, e_new, p_new, pbvc, bvc, ss4o3, length, stream_app); /*CUDA( hipFree(pbvc) ); CUDA( hipFree(bvc) ); CUDA( hipFree(q_new) ); CUDA( hipFree(e_new) ); CUDA( hipFree(p_new) ); CUDA( hipFree(work) ); CUDA( hipFree(ql) ); CUDA( hipFree(qq) ); CUDA( hipFree(compHalfStep) ); CUDA( hipFree(compression) ); CUDA( hipFree(q_old) ); CUDA( hipFree(p_old) ); CUDA( hipFree(delvc) ); CUDA( hipFree(e_old) );*/ } static inline void EvalEOSForElems_cpu(Real_t* vnewc, Index_t length) { Real_t e_cut = mesh.e_cut(); Real_t p_cut = mesh.p_cut(); Real_t ss4o3 = mesh.ss4o3(); Real_t q_cut = mesh.q_cut(); Real_t eosvmax = mesh.eosvmax(); Real_t eosvmin = mesh.eosvmin(); Real_t pmin = mesh.pmin(); Real_t emin = mesh.emin(); Real_t rho0 = mesh.refdens(); Real_t* e_old = Allocate<Real_t>(length); Real_t* delvc = Allocate<Real_t>(length); Real_t* p_old = Allocate<Real_t>(length); Real_t* q_old = Allocate<Real_t>(length); Real_t* compression = Allocate<Real_t>(length); Real_t* compHalfStep = Allocate<Real_t>(length); Real_t* qq = Allocate<Real_t>(length); Real_t* ql = Allocate<Real_t>(length); Real_t* work = Allocate<Real_t>(length); Real_t* p_new = Allocate<Real_t>(length); Real_t* e_new = Allocate<Real_t>(length); Real_t* q_new = Allocate<Real_t>(length); Real_t* bvc = Allocate<Real_t>(length); Real_t* pbvc = Allocate<Real_t>(length); /* compress data, minimal set */ for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); e_old[i] = mesh.e(zidx); } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); delvc[i] = mesh.delv(zidx); } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); p_old[i] = mesh.p(zidx); } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); q_old[i] = mesh.q(zidx); } for (Index_t i = 0; i < length; ++i) { Real_t vchalf; compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.); vchalf = vnewc[i] - delvc[i] * Real_t(.5); compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.); } /* Check for v > eosvmax or v < eosvmin */ if (eosvmin != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */ compHalfStep[i] = compression[i]; } } } if (eosvmax != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */ p_old[i] = Real_t(0.); compression[i] = Real_t(0.); compHalfStep[i] = Real_t(0.); } } } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); qq[i] = mesh.qq(zidx); ql[i] = mesh.ql(zidx); work[i] = Real_t(0.); } CalcEnergyForElems_cpu(p_new, e_new, q_new, bvc, pbvc, p_old, e_old, q_old, compression, compHalfStep, vnewc, work, delvc, pmin, p_cut, e_cut, q_cut, emin, qq, ql, rho0, eosvmax, length); for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); mesh.p(zidx) = p_new[i]; } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); mesh.e(zidx) = e_new[i]; } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); mesh.q(zidx) = q_new[i]; } CalcSoundSpeedForElems_cpu(vnewc, rho0, e_new, p_new, pbvc, bvc, ss4o3, length); Release(&pbvc); Release(&bvc); Release(&q_new); Release(&e_new); Release(&p_new); Release(&work); Release(&ql); Release(&qq); Release(&compHalfStep); Release(&compression); Release(&q_old); Release(&p_old); Release(&delvc); Release(&e_old); } __global__ void ApplyMaterialPropertiesForElemsPart1_kernel( Index_t length, Real_t eosvmin, Real_t eosvmax, Index_t* matElemlist, Real_t* vnew, Real_t* vnewc) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Index_t zn = matElemlist[i]; vnewc[i] = vnew[zn]; if (eosvmin != Real_t(0.)) { if (vnewc[i] < eosvmin) vnewc[i] = eosvmin; } if (eosvmax != Real_t(0.)) { if (vnewc[i] > eosvmax) vnewc[i] = eosvmax; } } } static inline void ApplyMaterialPropertiesForElems_gpu( hipStream_t stream_app) { Index_t length = mesh.numElem(); if (length != 0) { /* Expose all of the variables needed for material evaluation */ Real_t eosvmin = mesh.eosvmin(); Real_t eosvmax = mesh.eosvmax(); Real_t* vnewc; CUDA(hipMalloc(&vnewc, sizeof(Real_t) * length)); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); hipLaunchKernelGGL(( ApplyMaterialPropertiesForElemsPart1_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, eosvmin, eosvmax, meshGPU.m_matElemlist, meshGPU.m_vnew, vnewc); CUDA_DEBUGSYNC; /* for (Index_t i=0; i<length; ++i) { Index_t zn = mesh.matElemlist(i) ; Real_t vc = mesh.v(zn) ; if (eosvmin != Real_t(0.)) { if (vc < eosvmin) vc = eosvmin ; } if (eosvmax != Real_t(0.)) { if (vc > eosvmax) vc = eosvmax ; } if (vc <= 0.) { exit(VolumeError) ; } } */ EvalEOSForElems_gpu(vnewc, length, stream_app); // CUDA( hipFree(vnewc) ); } } static inline void ApplyMaterialPropertiesForElems_cpu() { Index_t length = mesh.numElem(); if (length != 0) { /* Expose all of the variables needed for material evaluation */ Real_t eosvmin = mesh.eosvmin(); Real_t eosvmax = mesh.eosvmax(); Real_t* vnewc = Allocate<Real_t>(length); for (Index_t i = 0; i < length; ++i) { Index_t zn = mesh.matElemlist(i); vnewc[i] = mesh.vnew(zn); } if (eosvmin != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] < eosvmin) vnewc[i] = eosvmin; } } if (eosvmax != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] > eosvmax) vnewc[i] = eosvmax; } } for (Index_t i = 0; i < length; ++i) { Index_t zn = mesh.matElemlist(i); Real_t vc = mesh.v(zn); if (eosvmin != Real_t(0.)) { if (vc < eosvmin) vc = eosvmin; } if (eosvmax != Real_t(0.)) { if (vc > eosvmax) vc = eosvmax; } if (vc <= 0.) { exit(VolumeError); } } EvalEOSForElems_cpu(vnewc, length); Release(&vnewc); } } static inline void ApplyMaterialPropertiesForElems(int useCPU, hipStream_t stream_app) { if (useCPU) { FC(matElemlist); FC(vnew); FC(v); FC(e); FC(delv); FC(p); FC(q); FC(qq); FC(ql); ApplyMaterialPropertiesForElems_cpu(); SG(p); SG(e); SG(q); SG(ss); } else { FG(matElemlist); FG(vnew); FG(v); FG(e); FG(delv); FG(p); FG(q); FG(qq); FG(ql); ApplyMaterialPropertiesForElems_gpu(stream_app); SC(p); SC(e); SC(q); SC(ss); } } __global__ void UpdateVolumesForElems_kernel(Index_t numElem, Real_t v_cut, Real_t* vnew, Real_t* v) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElem) { Real_t tmpV; tmpV = vnew[i]; if (FABS(tmpV - Real_t(1.0)) < v_cut) tmpV = Real_t(1.0); v[i] = tmpV; } } static inline void UpdateVolumesForElems_gpu(hipStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t v_cut = mesh.v_cut(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); hipLaunchKernelGGL(( UpdateVolumesForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, numElem, v_cut, meshGPU.m_vnew, meshGPU.m_v); } } static inline void UpdateVolumesForElems_cpu() { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t v_cut = mesh.v_cut(); for (Index_t i = 0; i < numElem; ++i) { Real_t tmpV; tmpV = mesh.vnew(i); if (FABS(tmpV - Real_t(1.0)) < v_cut) tmpV = Real_t(1.0); mesh.v(i) = tmpV; } } return; } static inline void UpdateVolumesForElems(int useCPU, hipStream_t stream_app) { if (useCPU) { FC(vnew); UpdateVolumesForElems_cpu(); SG(v); } else { FG(vnew); UpdateVolumesForElems_gpu(stream_app); SC(v); } } static inline void LagrangeElements(int useCPU, hipStream_t stream_app) { const Real_t deltatime = mesh.deltatime(); CalcLagrangeElements(deltatime, useCPU, stream_app); /* Calculate Q. (Monotonic q option requires communication) */ CalcQForElems(useCPU, stream_app); ApplyMaterialPropertiesForElems(useCPU, stream_app); UpdateVolumesForElems(useCPU, stream_app); } __global__ void CalcCourantConstraintForElems_kernel(Index_t length, Real_t qqc2, Index_t* matElemlist, Real_t* ss, Real_t* vdov, Real_t* arealg, Real_t* mindtcourant) { __shared__ Real_t minArray[BLOCKSIZE]; int i = blockDim.x * blockIdx.x + threadIdx.x; Real_t dtcourant = Real_t(1.0e+20); if (i < length) { Index_t indx = matElemlist[i]; Real_t dtf = ss[indx] * ss[indx]; if (vdov[indx] < Real_t(0.)) { dtf = dtf + qqc2 * arealg[indx] * arealg[indx] * vdov[indx] * vdov[indx]; } dtf = SQRT(dtf); dtf = arealg[indx] / dtf; /* determine minimum timestep with its corresponding elem */ if (vdov[indx] != Real_t(0.)) { if (dtf < dtcourant) { dtcourant = dtf; } } } minArray[threadIdx.x] = dtcourant; reduceMin<Real_t, BLOCKSIZE>(minArray, threadIdx.x); if (threadIdx.x == 0) mindtcourant[blockIdx.x] = minArray[0]; } static inline void CalcCourantConstraintForElems_gpu(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Real_t qqc = mesh.qqc(); Real_t qqc2 = Real_t(64.0) * qqc * qqc; Index_t length = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); Real_t* dev_mindtcourant; CUDA(hipMalloc(&dev_mindtcourant, sizeof(Real_t) * dimGrid.x)); hipLaunchKernelGGL(( CalcCourantConstraintForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, qqc2, meshGPU.m_matElemlist, meshGPU.m_ss, meshGPU.m_vdov, meshGPU.m_arealg, dev_mindtcourant); CUDA_DEBUGSYNC; pthread_mutex_unlock(mutexapp); if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); Real_t* mindtcourant = (Real_t*)malloc(sizeof(Real_t) * dimGrid.x); CUDA(hipMemcpyAsync(mindtcourant, dev_mindtcourant, sizeof(Real_t) * dimGrid.x, hipMemcpyDeviceToHost, stream_app)); // CUDA( hipFree(dev_mindtcourant) ); // finish the MIN computation over the thread blocks Real_t dtcourant; dtcourant = mindtcourant[0]; for (int i = 1; i < dimGrid.x; i++) { MINEQ(dtcourant, mindtcourant[i]); } if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); free(mindtcourant); if (dtcourant < Real_t(1.0e+20)) mesh.dtcourant() = dtcourant; } static inline void CalcCourantConstraintForElems_cpu() { Real_t dtcourant = Real_t(1.0e+20); Index_t courant_elem = -1; Real_t qqc = mesh.qqc(); Index_t length = mesh.numElem(); Real_t qqc2 = Real_t(64.0) * qqc * qqc; for (Index_t i = 0; i < length; ++i) { Index_t indx = mesh.matElemlist(i); Real_t dtf = mesh.ss(indx) * mesh.ss(indx); if (mesh.vdov(indx) < Real_t(0.)) { dtf = dtf + qqc2 * mesh.arealg(indx) * mesh.arealg(indx) * mesh.vdov(indx) * mesh.vdov(indx); } dtf = SQRT(dtf); dtf = mesh.arealg(indx) / dtf; /* determine minimum timestep with its corresponding elem */ if (mesh.vdov(indx) != Real_t(0.)) { if (dtf < dtcourant) { dtcourant = dtf; courant_elem = indx; } } } /* Don't try to register a time constraint if none of the elements * were active */ if (courant_elem != -1) { mesh.dtcourant() = dtcourant; } return; } static inline void CalcCourantConstraintForElems(int useCPU, hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { if (useCPU) { FC(matElemlist); FC(ss); FC(vdov); FC(arealg); CalcCourantConstraintForElems_cpu(); } else { FG(matElemlist); FG(ss); FG(vdov); FG(arealg); CalcCourantConstraintForElems_gpu(stream_app, mutexapp, flag); } } __global__ void CalcHydroConstraintForElems_kernel(Index_t length, Real_t dvovmax, Index_t* matElemlist, Real_t* vdov, Real_t* mindthydro) { __shared__ Real_t minArray[BLOCKSIZE]; int i = blockDim.x * blockIdx.x + threadIdx.x; Real_t dthydro = Real_t(1.0e+20); if (i < length) { Index_t indx = matElemlist[i]; if (vdov[indx] != Real_t(0.)) { Real_t dtdvov = dvovmax / (FABS(vdov[indx]) + Real_t(1.e-20)); if (dthydro > dtdvov) { dthydro = dtdvov; } } } minArray[threadIdx.x] = dthydro; reduceMin<Real_t, BLOCKSIZE>(minArray, threadIdx.x); if (threadIdx.x == 0) mindthydro[blockIdx.x] = minArray[0]; } static inline void CalcHydroConstraintForElems_gpu(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Real_t dvovmax = mesh.dvovmax(); Index_t length = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); Real_t* dev_mindthydro; CUDA(hipMalloc(&dev_mindthydro, sizeof(Real_t) * dimGrid.x)); hipLaunchKernelGGL(( CalcHydroConstraintForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream_app, length, dvovmax, meshGPU.m_matElemlist, meshGPU.m_vdov, dev_mindthydro); CUDA_DEBUGSYNC; pthread_mutex_unlock(mutexapp); if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); Real_t* mindthydro = (Real_t*)malloc(sizeof(Real_t) * dimGrid.x); CUDA(hipMemcpyAsync(mindthydro, dev_mindthydro, sizeof(Real_t) * dimGrid.x, hipMemcpyDeviceToHost, stream_app)); // CUDA( hipFree(dev_mindthydro) ); // finish the MIN computation over the thread blocks Real_t dthydro = mindthydro[0]; for (int i = 1; i < dimGrid.x; i++) { MINEQ(dthydro, mindthydro[i]); } if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); free(mindthydro); if (dthydro < Real_t(1.0e+20)) mesh.dthydro() = dthydro; } static inline void CalcHydroConstraintForElems_cpu() { Real_t dthydro = Real_t(1.0e+20); Index_t hydro_elem = -1; Real_t dvovmax = mesh.dvovmax(); Index_t length = mesh.numElem(); for (Index_t i = 0; i < length; ++i) { Index_t indx = mesh.matElemlist(i); if (mesh.vdov(indx) != Real_t(0.)) { Real_t dtdvov = dvovmax / (FABS(mesh.vdov(indx)) + Real_t(1.e-20)); if (dthydro > dtdvov) { dthydro = dtdvov; hydro_elem = indx; } } } if (hydro_elem != -1) { mesh.dthydro() = dthydro; } return; } static inline void CalcHydroConstraintForElems(int useCPU, hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { if (useCPU) { FC(matElemlist); FC(vdov); CalcHydroConstraintForElems_cpu(); } else { FG(matElemlist); FG(vdov); CalcHydroConstraintForElems_gpu(stream_app, mutexapp, flag); } } static inline void CalcTimeConstraintsForElems(int useCPU, hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { /* evaluate time constraint */ CalcCourantConstraintForElems(useCPU, stream_app, mutexapp, flag); pthread_mutex_lock(mutexapp); /* check hydro constraint */ CalcHydroConstraintForElems(useCPU, stream_app, mutexapp, flag); pthread_mutex_lock(mutexapp); } static inline void LagrangeLeapFrog(int useCPU, hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { /* calculate nodal forces, accelerations, velocities, positions, with * applied boundary conditions and slide surface considerations */ LagrangeNodal(useCPU, stream_app); /* calculate element quantities (i.e. velocity gradient & q), and update * material states */ LagrangeElements(useCPU, stream_app); CalcTimeConstraintsForElems(useCPU, stream_app, mutexapp, flag); // LagrangeRelease() ; Creation/destruction of temps may be important to // capture } int main_lulesh(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Index_t edgeElems = 45; Index_t edgeNodes = edgeElems + 1; // Real_t ds = Real_t(1.125)/Real_t(edgeElems) ; /* may accumulate roundoff */ Real_t tx, ty, tz; Index_t nidx, zidx; Index_t meshElems; streamApp = stream_app; /* get run options to measure various metrics */ /* ... */ cuda_init(); /****************************/ /* Initialize Sedov Mesh */ /****************************/ /* construct a uniform box for this processor */ mesh.sizeX() = edgeElems; mesh.sizeY() = edgeElems; mesh.sizeZ() = edgeElems; mesh.numElem() = edgeElems * edgeElems * edgeElems; mesh.numNode() = edgeNodes * edgeNodes * edgeNodes; meshElems = mesh.numElem(); /* allocate field memory */ mesh.AllocateElemPersistent(mesh.numElem()); mesh.AllocateElemTemporary(mesh.numElem()); mesh.AllocateNodalPersistent(mesh.numNode()); mesh.AllocateNodesets(edgeNodes * edgeNodes); /* initialize nodal coordinates */ nidx = 0; tz = Real_t(0.); for (Index_t plane = 0; plane < edgeNodes; ++plane) { ty = Real_t(0.); for (Index_t row = 0; row < edgeNodes; ++row) { tx = Real_t(0.); for (Index_t col = 0; col < edgeNodes; ++col) { mesh.x(nidx) = tx; mesh.y(nidx) = ty; mesh.z(nidx) = tz; ++nidx; // tx += ds ; /* may accumulate roundoff... */ tx = Real_t(1.125) * Real_t(col + 1) / Real_t(edgeElems); } // ty += ds ; /* may accumulate roundoff... */ ty = Real_t(1.125) * Real_t(row + 1) / Real_t(edgeElems); } // tz += ds ; /* may accumulate roundoff... */ tz = Real_t(1.125) * Real_t(plane + 1) / Real_t(edgeElems); } /* embed hexehedral elements in nodal point lattice */ nidx = 0; zidx = 0; for (Index_t plane = 0; plane < edgeElems; ++plane) { for (Index_t row = 0; row < edgeElems; ++row) { for (Index_t col = 0; col < edgeElems; ++col) { mesh.nodelist(zidx, 0) = nidx; mesh.nodelist(zidx, 1) = nidx + 1; mesh.nodelist(zidx, 2) = nidx + edgeNodes + 1; mesh.nodelist(zidx, 3) = nidx + edgeNodes; mesh.nodelist(zidx, 4) = nidx + edgeNodes * edgeNodes; mesh.nodelist(zidx, 5) = nidx + edgeNodes * edgeNodes + 1; mesh.nodelist(zidx, 6) = nidx + edgeNodes * edgeNodes + edgeNodes + 1; mesh.nodelist(zidx, 7) = nidx + edgeNodes * edgeNodes + edgeNodes; ++zidx; ++nidx; } ++nidx; } nidx += edgeNodes; } /* Create a material IndexSet (entire mesh same material for now) */ for (Index_t i = 0; i < meshElems; ++i) { mesh.matElemlist(i) = i; } /* initialize material parameters */ mesh.dtfixed() = Real_t(-1.0e-7); mesh.deltatime() = Real_t(1.0e-7); mesh.deltatimemultlb() = Real_t(1.1); mesh.deltatimemultub() = Real_t(1.2); mesh.stoptime() = Real_t(1.0e-2); mesh.dtcourant() = Real_t(1.0e+20); mesh.dthydro() = Real_t(1.0e+20); mesh.dtmax() = Real_t(1.0e-2); mesh.time() = Real_t(0.); mesh.cycle() = 0; mesh.e_cut() = Real_t(1.0e-7); mesh.p_cut() = Real_t(1.0e-7); mesh.q_cut() = Real_t(1.0e-7); mesh.u_cut() = Real_t(1.0e-7); mesh.v_cut() = Real_t(1.0e-10); mesh.hgcoef() = Real_t(3.0); mesh.ss4o3() = Real_t(4.0) / Real_t(3.0); mesh.qstop() = Real_t(1.0e+12); mesh.monoq_max_slope() = Real_t(1.0); mesh.monoq_limiter_mult() = Real_t(2.0); mesh.qlc_monoq() = Real_t(0.5); mesh.qqc_monoq() = Real_t(2.0) / Real_t(3.0); mesh.qqc() = Real_t(2.0); mesh.pmin() = Real_t(0.); mesh.emin() = Real_t(-1.0e+15); mesh.dvovmax() = Real_t(0.1); mesh.eosvmax() = Real_t(1.0e+9); mesh.eosvmin() = Real_t(1.0e-9); mesh.refdens() = Real_t(1.0); /* initialize field data */ for (Index_t i = 0; i < meshElems; ++i) { Real_t x_local[8], y_local[8], z_local[8]; for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(i, lnode); x_local[lnode] = mesh.x(gnode); y_local[lnode] = mesh.y(gnode); z_local[lnode] = mesh.z(gnode); } // volume calculations Real_t volume = CalcElemVolume(x_local, y_local, z_local); mesh.volo(i) = volume; mesh.elemMass(i) = volume; for (Index_t j = 0; j < 8; ++j) { Index_t idx = mesh.nodelist(i, j); mesh.nodalMass(idx) += volume / Real_t(8.0); } } /* deposit energy */ mesh.e(0) = Real_t(3.948746e+7); /* set up symmetry nodesets */ nidx = 0; for (Index_t i = 0; i < edgeNodes; ++i) { Index_t planeInc = i * edgeNodes * edgeNodes; Index_t rowInc = i * edgeNodes; for (Index_t j = 0; j < edgeNodes; ++j) { mesh.symmX(nidx) = planeInc + j * edgeNodes; mesh.symmY(nidx) = planeInc + j; mesh.symmZ(nidx) = rowInc + j; ++nidx; } } /* set up elemement connectivity information */ mesh.lxim(0) = 0; for (Index_t i = 1; i < meshElems; ++i) { mesh.lxim(i) = i - 1; mesh.lxip(i - 1) = i; } mesh.lxip(meshElems - 1) = meshElems - 1; for (Index_t i = 0; i < edgeElems; ++i) { mesh.letam(i) = i; mesh.letap(meshElems - edgeElems + i) = meshElems - edgeElems + i; } for (Index_t i = edgeElems; i < meshElems; ++i) { mesh.letam(i) = i - edgeElems; mesh.letap(i - edgeElems) = i; } for (Index_t i = 0; i < edgeElems * edgeElems; ++i) { mesh.lzetam(i) = i; mesh.lzetap(meshElems - edgeElems * edgeElems + i) = meshElems - edgeElems * edgeElems + i; } for (Index_t i = edgeElems * edgeElems; i < meshElems; ++i) { mesh.lzetam(i) = i - edgeElems * edgeElems; mesh.lzetap(i - edgeElems * edgeElems) = i; } /* set up boundary condition information */ for (Index_t i = 0; i < meshElems; ++i) { mesh.elemBC(i) = 0; /* clear BCs by default */ } /* faces on "external" boundaries will be */ /* symmetry plane or free surface BCs */ for (Index_t i = 0; i < edgeElems; ++i) { Index_t planeInc = i * edgeElems * edgeElems; Index_t rowInc = i * edgeElems; for (Index_t j = 0; j < edgeElems; ++j) { mesh.elemBC(planeInc + j * edgeElems) |= XI_M_SYMM; mesh.elemBC(planeInc + j * edgeElems + edgeElems - 1) |= XI_P_FREE; mesh.elemBC(planeInc + j) |= ETA_M_SYMM; mesh.elemBC(planeInc + j + edgeElems * edgeElems - edgeElems) |= ETA_P_FREE; mesh.elemBC(rowInc + j) |= ZETA_M_SYMM; mesh.elemBC(rowInc + j + meshElems - edgeElems * edgeElems) |= ZETA_P_FREE; } } mesh.AllocateNodeElemIndexes(); /* initialize meshGPU */ meshGPU.init(&mesh); meshGPU.freshenGPU(); /* timestep to solution */ int its = 0; #if 0 while (its<50) { #else while (mesh.time() < mesh.stoptime()) { #endif TimeIncrement(); LagrangeLeapFrog(0, stream_app, mutexapp, flag); its++; /* problem->commNodes->Transfer(CommNodes::syncposvel) ; */ #if LULESH_SHOW_PROGRESS printf("time = %e, dt=%e\n", double(mesh.time()), double(mesh.deltatime())); #endif } printf("iterations: %d\n", its); // FC(x); // FILE *fp = fopen("x.asc","wb"); // for (Index_t i=0; i<mesh.numElem(); i++) // fprintf(fp,"%.6f\n",mesh.x(i)); // fclose(fp); return 0; }
7c20f6f270259cb127997d2633c0e5cf971d0c57.cu
/* Copyright (c) 2010. Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-461231 All rights reserved. This file is part of LULESH, Version 1.0. Please also read this link -- http://www.opensource.org/licenses/index.php Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Additional BSD Notice 1. This notice is required to be provided under our contract with the U.S. Department of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE. 2. Neither the United States Government nor Lawrence Livermore National Security, LLC nor any of their employees, makes any warranty, express or implied, or assumes any liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately-owned rights. 3. Also, reference herein to any specific commercial products, process, or services by trade name, trademark, manufacturer or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes. */ #include <cuda.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include "../benchmark_common.h" #define LULESH_SHOW_PROGRESS 1 enum { VolumeError = -1, QStopError = -2 }; cudaStream_t streamApp; /****************************************************/ /* Allow flexibility for arithmetic representations */ /****************************************************/ /* Could also support fixed point and interval arithmetic types */ typedef float real4; typedef double real8; typedef long double real10; /* 10 bytes on x86 */ typedef int Index_t; /* array subscript and loop index */ typedef real8 Real_t; /* floating point representation */ typedef int Int_t; /* integer representation */ __host__ __device__ inline real4 SQRT(real4 arg) { return sqrtf(arg); } __host__ __device__ inline real8 SQRT(real8 arg) { return sqrt(arg); } __host__ inline real10 SQRT(real10 arg) { return sqrtl(arg); } __host__ __device__ inline real4 CBRT(real4 arg) { return cbrtf(arg); } __host__ __device__ inline real8 CBRT(real8 arg) { return cbrt(arg); } __host__ inline real10 CBRT(real10 arg) { return cbrtl(arg); } __host__ __device__ inline real4 FABS(real4 arg) { return fabsf(arg); } __host__ __device__ inline real8 FABS(real8 arg) { return fabs(arg); } __host__ inline real10 FABS(real10 arg) { return fabsl(arg); } __host__ __device__ inline real4 FMAX(real4 arg1, real4 arg2) { return fmaxf(arg1, arg2); } __host__ __device__ inline real8 FMAX(real8 arg1, real8 arg2) { return fmax(arg1, arg2); } __host__ inline real10 FMAX(real10 arg1, real10 arg2) { return fmaxl(arg1, arg2); } /*#define CUDA_SAFE_CALL( call) do { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0) */ #define CUDA(call) CUDA_SAFE_CALL(call) #ifdef CUDA_SYNC_ALL #define CUDA_DEBUGSYNC CUDA(cudaThreadSynchronize()) #else #define CUDA_DEBUGSYNC #endif #define BLOCKSIZE 256 /* Given a number of bytes, nbytes, and a byte alignment, align, (e.g., 2, * 4, 8, or 16), return the smallest integer that is larger than nbytes and * a multiple of align. */ #define PAD_DIV(nbytes, align) (((nbytes) + (align)-1) / (align)) #define PAD(nbytes, align) (PAD_DIV((nbytes), (align)) * (align)) /* More general version of reduceInPlacePOT (this works for arbitrary * numThreadsPerBlock <= 1024). Again, conditionals on * numThreadsPerBlock are evaluated at compile time. */ template <class T, int numThreadsPerBlock> __device__ void reduceSum(T* sresult, const int threadID) { /* If number of threads is not a power of two, first add the ones after the last power of two into the beginning. At most one of these conditionals will be true for a given NPOT block size. */ if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024) { __syncthreads(); if (threadID < numThreadsPerBlock - 512) sresult[threadID] += sresult[threadID + 512]; } if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512) { __syncthreads(); if (threadID < numThreadsPerBlock - 256) sresult[threadID] += sresult[threadID + 256]; } if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256) { __syncthreads(); if (threadID < numThreadsPerBlock - 128) sresult[threadID] += sresult[threadID + 128]; } if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128) { __syncthreads(); if (threadID < numThreadsPerBlock - 64) sresult[threadID] += sresult[threadID + 64]; } if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64) { __syncthreads(); if (threadID < numThreadsPerBlock - 32) sresult[threadID] += sresult[threadID + 32]; } if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32) { __syncthreads(); if (threadID < numThreadsPerBlock - 16) sresult[threadID] += sresult[threadID + 16]; } if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16) { __syncthreads(); if (threadID < numThreadsPerBlock - 8) sresult[threadID] += sresult[threadID + 8]; } if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8) { __syncthreads(); if (threadID < numThreadsPerBlock - 4) sresult[threadID] += sresult[threadID + 4]; } if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4) { __syncthreads(); if (threadID < numThreadsPerBlock - 2) sresult[threadID] += sresult[threadID + 2]; } if (numThreadsPerBlock >= 512) { __syncthreads(); if (threadID < 256) sresult[threadID] += sresult[threadID + 256]; } if (numThreadsPerBlock >= 256) { __syncthreads(); if (threadID < 128) sresult[threadID] += sresult[threadID + 128]; } if (numThreadsPerBlock >= 128) { __syncthreads(); if (threadID < 64) sresult[threadID] += sresult[threadID + 64]; } __syncthreads(); #ifdef _DEVICEEMU if (numThreadsPerBlock >= 64) { __syncthreads(); if (threadID < 32) sresult[threadID] += sresult[threadID + 32]; } if (numThreadsPerBlock >= 32) { __syncthreads(); if (threadID < 16) sresult[threadID] += sresult[threadID + 16]; } if (numThreadsPerBlock >= 16) { __syncthreads(); if (threadID < 8) sresult[threadID] += sresult[threadID + 8]; } if (numThreadsPerBlock >= 8) { __syncthreads(); if (threadID < 4) sresult[threadID] += sresult[threadID + 4]; } if (numThreadsPerBlock >= 4) { __syncthreads(); if (threadID < 2) sresult[threadID] += sresult[threadID + 2]; } if (numThreadsPerBlock >= 2) { __syncthreads(); if (threadID < 1) sresult[threadID] += sresult[threadID + 1]; } #else if (threadID < 32) { volatile T* vol = sresult; if (numThreadsPerBlock >= 64) vol[threadID] += vol[threadID + 32]; if (numThreadsPerBlock >= 32) vol[threadID] += vol[threadID + 16]; if (numThreadsPerBlock >= 16) vol[threadID] += vol[threadID + 8]; if (numThreadsPerBlock >= 8) vol[threadID] += vol[threadID + 4]; if (numThreadsPerBlock >= 4) vol[threadID] += vol[threadID + 2]; if (numThreadsPerBlock >= 2) vol[threadID] += vol[threadID + 1]; } #endif __syncthreads(); } #define MINEQ(a, b) (a) = (((a) < (b)) ? (a) : (b)) template <class T, int numThreadsPerBlock> __device__ void reduceMin(T* sresult, const int threadID) { /* If number of threads is not a power of two, first add the ones after the last power of two into the beginning. At most one of these conditionals will be true for a given NPOT block size. */ if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024) { __syncthreads(); if (threadID < numThreadsPerBlock - 512) MINEQ(sresult[threadID], sresult[threadID + 512]); } if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512) { __syncthreads(); if (threadID < numThreadsPerBlock - 256) MINEQ(sresult[threadID], sresult[threadID + 256]); } if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256) { __syncthreads(); if (threadID < numThreadsPerBlock - 128) MINEQ(sresult[threadID], sresult[threadID + 128]); } if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128) { __syncthreads(); if (threadID < numThreadsPerBlock - 64) MINEQ(sresult[threadID], sresult[threadID + 64]); } if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64) { __syncthreads(); if (threadID < numThreadsPerBlock - 32) MINEQ(sresult[threadID], sresult[threadID + 32]); } if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32) { __syncthreads(); if (threadID < numThreadsPerBlock - 16) MINEQ(sresult[threadID], sresult[threadID + 16]); } if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16) { __syncthreads(); if (threadID < numThreadsPerBlock - 8) MINEQ(sresult[threadID], sresult[threadID + 8]); } if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8) { __syncthreads(); if (threadID < numThreadsPerBlock - 4) MINEQ(sresult[threadID], sresult[threadID + 4]); } if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4) { __syncthreads(); if (threadID < numThreadsPerBlock - 2) MINEQ(sresult[threadID], sresult[threadID + 2]); } if (numThreadsPerBlock >= 512) { __syncthreads(); if (threadID < 256) MINEQ(sresult[threadID], sresult[threadID + 256]); } if (numThreadsPerBlock >= 256) { __syncthreads(); if (threadID < 128) MINEQ(sresult[threadID], sresult[threadID + 128]); } if (numThreadsPerBlock >= 128) { __syncthreads(); if (threadID < 64) MINEQ(sresult[threadID], sresult[threadID + 64]); } __syncthreads(); #ifdef _DEVICEEMU if (numThreadsPerBlock >= 64) { __syncthreads(); if (threadID < 32) MINEQ(sresult[threadID], sresult[threadID + 32]); } if (numThreadsPerBlock >= 32) { __syncthreads(); if (threadID < 16) MINEQ(sresult[threadID], sresult[threadID + 16]); } if (numThreadsPerBlock >= 16) { __syncthreads(); if (threadID < 8) MINEQ(sresult[threadID], sresult[threadID + 8]); } if (numThreadsPerBlock >= 8) { __syncthreads(); if (threadID < 4) MINEQ(sresult[threadID], sresult[threadID + 4]); } if (numThreadsPerBlock >= 4) { __syncthreads(); if (threadID < 2) MINEQ(sresult[threadID], sresult[threadID + 2]); } if (numThreadsPerBlock >= 2) { __syncthreads(); if (threadID < 1) MINEQ(sresult[threadID], sresult[threadID + 1]); } #else if (threadID < 32) { volatile T* vol = sresult; if (numThreadsPerBlock >= 64) MINEQ(vol[threadID], vol[threadID + 32]); if (numThreadsPerBlock >= 32) MINEQ(vol[threadID], vol[threadID + 16]); if (numThreadsPerBlock >= 16) MINEQ(vol[threadID], vol[threadID + 8]); if (numThreadsPerBlock >= 8) MINEQ(vol[threadID], vol[threadID + 4]); if (numThreadsPerBlock >= 4) MINEQ(vol[threadID], vol[threadID + 2]); if (numThreadsPerBlock >= 2) MINEQ(vol[threadID], vol[threadID + 1]); } #endif __syncthreads(); } void cuda_init() { int deviceCount, dev; cudaDeviceProp cuda_deviceProp; char* s; CUDA(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n"); exit(1); } if (s = getenv("CUDA_DEVICE")) dev = atoi(s); else dev = 0; if ((dev < 0) || (dev > deviceCount - 1)) { fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n", dev, 0, deviceCount - 1); exit(1); } CUDA(cudaGetDeviceProperties(&cuda_deviceProp, dev)); if (cuda_deviceProp.major < 1) { fprintf(stderr, "cuda_init(): device %d does not support CUDA.\n", dev); exit(1); } fprintf(stderr, "setting CUDA device %d\n", dev); CUDA(cudaSetDevice(dev)); } /************************************************************/ /* Allow for flexible data layout experiments by separating */ /* array interface from underlying implementation. */ /************************************************************/ struct Mesh { /* This first implementation allows for runnable code */ /* and is not meant to be optimal. Final implementation */ /* should separate declaration and allocation phases */ /* so that allocation can be scheduled in a cache conscious */ /* manner. */ friend struct MeshGPU; public: /**************/ /* Allocation */ /**************/ void AllocateNodalPersistent(size_t size) { m_x.resize(size); m_y.resize(size); m_z.resize(size); m_xd.resize(size, Real_t(0.)); m_yd.resize(size, Real_t(0.)); m_zd.resize(size, Real_t(0.)); m_xdd.resize(size, Real_t(0.)); m_ydd.resize(size, Real_t(0.)); m_zdd.resize(size, Real_t(0.)); m_fx.resize(size); m_fy.resize(size); m_fz.resize(size); m_nodalMass.resize(size, Real_t(0.)); } void AllocateElemPersistent(size_t size) { m_matElemlist.resize(size); m_nodelist.resize(8 * size); m_lxim.resize(size); m_lxip.resize(size); m_letam.resize(size); m_letap.resize(size); m_lzetam.resize(size); m_lzetap.resize(size); m_elemBC.resize(size); m_e.resize(size, Real_t(0.)); m_p.resize(size, Real_t(0.)); m_q.resize(size); m_ql.resize(size); m_qq.resize(size); m_v.resize(size, 1.0); m_volo.resize(size); m_delv.resize(size); m_vdov.resize(size); m_arealg.resize(size); m_ss.resize(size); m_elemMass.resize(size); } /* Temporaries should not be initialized in bulk but */ /* this is a runnable placeholder for now */ void AllocateElemTemporary(size_t size) { m_dxx.resize(size); m_dyy.resize(size); m_dzz.resize(size); m_delv_xi.resize(size); m_delv_eta.resize(size); m_delv_zeta.resize(size); m_delx_xi.resize(size); m_delx_eta.resize(size); m_delx_zeta.resize(size); m_vnew.resize(size); } void AllocateNodesets(size_t size) { m_symmX.resize(size); m_symmY.resize(size); m_symmZ.resize(size); } void AllocateNodeElemIndexes() { Index_t i, j, nidx; /* set up node-centered indexing of elements */ m_nodeElemCount.resize(m_numNode); for (i = 0; i < m_numNode; i++) m_nodeElemCount[i] = 0; m_nodeElemCornerList.resize(m_numNode * 8); for (i = 0; i < m_numElem; i++) { for (j = 0; j < 8; j++) { nidx = nodelist(i, j); m_nodeElemCornerList[nidx + m_numNode * m_nodeElemCount[nidx]++] = i + m_numElem * j; if (m_nodeElemCount[nidx] > 8) { fprintf(stderr, "Node degree is higher than 8!\n"); exit(1); } } } } /**********/ /* Access */ /**********/ /* Node-centered */ Real_t& x(Index_t idx) { return m_x[idx]; } Real_t& y(Index_t idx) { return m_y[idx]; } Real_t& z(Index_t idx) { return m_z[idx]; } Real_t& xd(Index_t idx) { return m_xd[idx]; } Real_t& yd(Index_t idx) { return m_yd[idx]; } Real_t& zd(Index_t idx) { return m_zd[idx]; } Real_t& xdd(Index_t idx) { return m_xdd[idx]; } Real_t& ydd(Index_t idx) { return m_ydd[idx]; } Real_t& zdd(Index_t idx) { return m_zdd[idx]; } Real_t& fx(Index_t idx) { return m_fx[idx]; } Real_t& fy(Index_t idx) { return m_fy[idx]; } Real_t& fz(Index_t idx) { return m_fz[idx]; } Real_t& nodalMass(Index_t idx) { return m_nodalMass[idx]; } Index_t& symmX(Index_t idx) { return m_symmX[idx]; } Index_t& symmY(Index_t idx) { return m_symmY[idx]; } Index_t& symmZ(Index_t idx) { return m_symmZ[idx]; } /* Element-centered */ Index_t& matElemlist(Index_t idx) { return m_matElemlist[idx]; } Index_t& nodelist(Index_t idx, Index_t nidx) { return m_nodelist[idx + nidx * m_numElem]; } Index_t& lxim(Index_t idx) { return m_lxim[idx]; } Index_t& lxip(Index_t idx) { return m_lxip[idx]; } Index_t& letam(Index_t idx) { return m_letam[idx]; } Index_t& letap(Index_t idx) { return m_letap[idx]; } Index_t& lzetam(Index_t idx) { return m_lzetam[idx]; } Index_t& lzetap(Index_t idx) { return m_lzetap[idx]; } Int_t& elemBC(Index_t idx) { return m_elemBC[idx]; } Real_t& dxx(Index_t idx) { return m_dxx[idx]; } Real_t& dyy(Index_t idx) { return m_dyy[idx]; } Real_t& dzz(Index_t idx) { return m_dzz[idx]; } Real_t& delv_xi(Index_t idx) { return m_delv_xi[idx]; } Real_t& delv_eta(Index_t idx) { return m_delv_eta[idx]; } Real_t& delv_zeta(Index_t idx) { return m_delv_zeta[idx]; } Real_t& delx_xi(Index_t idx) { return m_delx_xi[idx]; } Real_t& delx_eta(Index_t idx) { return m_delx_eta[idx]; } Real_t& delx_zeta(Index_t idx) { return m_delx_zeta[idx]; } Real_t& e(Index_t idx) { return m_e[idx]; } Real_t& p(Index_t idx) { return m_p[idx]; } Real_t& q(Index_t idx) { return m_q[idx]; } Real_t& ql(Index_t idx) { return m_ql[idx]; } Real_t& qq(Index_t idx) { return m_qq[idx]; } Real_t& v(Index_t idx) { return m_v[idx]; } Real_t& volo(Index_t idx) { return m_volo[idx]; } Real_t& vnew(Index_t idx) { return m_vnew[idx]; } Real_t& delv(Index_t idx) { return m_delv[idx]; } Real_t& vdov(Index_t idx) { return m_vdov[idx]; } Real_t& arealg(Index_t idx) { return m_arealg[idx]; } Real_t& ss(Index_t idx) { return m_ss[idx]; } Real_t& elemMass(Index_t idx) { return m_elemMass[idx]; } /* Params */ Real_t& dtfixed() { return m_dtfixed; } Real_t& time() { return m_time; } Real_t& deltatime() { return m_deltatime; } Real_t& deltatimemultlb() { return m_deltatimemultlb; } Real_t& deltatimemultub() { return m_deltatimemultub; } Real_t& stoptime() { return m_stoptime; } Real_t& u_cut() { return m_u_cut; } Real_t& hgcoef() { return m_hgcoef; } Real_t& qstop() { return m_qstop; } Real_t& monoq_max_slope() { return m_monoq_max_slope; } Real_t& monoq_limiter_mult() { return m_monoq_limiter_mult; } Real_t& e_cut() { return m_e_cut; } Real_t& p_cut() { return m_p_cut; } Real_t& ss4o3() { return m_ss4o3; } Real_t& q_cut() { return m_q_cut; } Real_t& v_cut() { return m_v_cut; } Real_t& qlc_monoq() { return m_qlc_monoq; } Real_t& qqc_monoq() { return m_qqc_monoq; } Real_t& qqc() { return m_qqc; } Real_t& eosvmax() { return m_eosvmax; } Real_t& eosvmin() { return m_eosvmin; } Real_t& pmin() { return m_pmin; } Real_t& emin() { return m_emin; } Real_t& dvovmax() { return m_dvovmax; } Real_t& refdens() { return m_refdens; } Real_t& dtcourant() { return m_dtcourant; } Real_t& dthydro() { return m_dthydro; } Real_t& dtmax() { return m_dtmax; } Int_t& cycle() { return m_cycle; } Index_t& sizeX() { return m_sizeX; } Index_t& sizeY() { return m_sizeY; } Index_t& sizeZ() { return m_sizeZ; } Index_t& numElem() { return m_numElem; } Index_t& numNode() { return m_numNode; } // private: /******************/ /* Implementation */ /******************/ /* Node-centered */ std::vector<Real_t> m_x; /* coordinates */ std::vector<Real_t> m_y; std::vector<Real_t> m_z; std::vector<Real_t> m_xd; /* velocities */ std::vector<Real_t> m_yd; std::vector<Real_t> m_zd; std::vector<Real_t> m_xdd; /* accelerations */ std::vector<Real_t> m_ydd; std::vector<Real_t> m_zdd; std::vector<Real_t> m_fx; /* forces */ std::vector<Real_t> m_fy; std::vector<Real_t> m_fz; std::vector<Real_t> m_nodalMass; /* mass */ std::vector<Index_t> m_symmX; /* symmetry plane nodesets */ std::vector<Index_t> m_symmY; std::vector<Index_t> m_symmZ; std::vector<Int_t> m_nodeElemCount; std::vector<Index_t> m_nodeElemCornerList; /* Element-centered */ std::vector<Index_t> m_matElemlist; /* material indexset */ std::vector<Index_t> m_nodelist; /* elemToNode connectivity */ std::vector<Index_t> m_lxim; /* element connectivity across each face */ std::vector<Index_t> m_lxip; std::vector<Index_t> m_letam; std::vector<Index_t> m_letap; std::vector<Index_t> m_lzetam; std::vector<Index_t> m_lzetap; std::vector<Int_t> m_elemBC; /* symmetry/-surface flags for each elem face */ std::vector<Real_t> m_dxx; /* principal strains -- temporary */ std::vector<Real_t> m_dyy; std::vector<Real_t> m_dzz; std::vector<Real_t> m_delv_xi; /* velocity gradient -- temporary */ std::vector<Real_t> m_delv_eta; std::vector<Real_t> m_delv_zeta; std::vector<Real_t> m_delx_xi; /* coordinate gradient -- temporary */ std::vector<Real_t> m_delx_eta; std::vector<Real_t> m_delx_zeta; std::vector<Real_t> m_e; /* energy */ std::vector<Real_t> m_p; /* pressure */ std::vector<Real_t> m_q; /* q */ std::vector<Real_t> m_ql; /* linear term for q */ std::vector<Real_t> m_qq; /* quadratic term for q */ std::vector<Real_t> m_v; /* relative volume */ std::vector<Real_t> m_volo; /* reference volume */ std::vector<Real_t> m_vnew; /* new relative volume -- temporary */ std::vector<Real_t> m_delv; /* m_vnew - m_v */ std::vector<Real_t> m_vdov; /* volume derivative over volume */ std::vector<Real_t> m_arealg; /* characteristic length of an element */ std::vector<Real_t> m_ss; /* "sound speed" */ std::vector<Real_t> m_elemMass; /* mass */ /* Parameters */ Real_t m_dtfixed; /* fixed time increment */ Real_t m_time; /* current time */ Real_t m_deltatime; /* variable time increment */ Real_t m_deltatimemultlb; Real_t m_deltatimemultub; Real_t m_stoptime; /* end time for simulation */ Real_t m_u_cut; /* velocity tolerance */ Real_t m_hgcoef; /* hourglass control */ Real_t m_qstop; /* excessive q indicator */ Real_t m_monoq_max_slope; Real_t m_monoq_limiter_mult; Real_t m_e_cut; /* energy tolerance */ Real_t m_p_cut; /* pressure tolerance */ Real_t m_ss4o3; Real_t m_q_cut; /* q tolerance */ Real_t m_v_cut; /* relative volume tolerance */ Real_t m_qlc_monoq; /* linear term coef for q */ Real_t m_qqc_monoq; /* quadratic term coef for q */ Real_t m_qqc; Real_t m_eosvmax; Real_t m_eosvmin; Real_t m_pmin; /* pressure floor */ Real_t m_emin; /* energy floor */ Real_t m_dvovmax; /* maximum allowable volume change */ Real_t m_refdens; /* reference density */ Real_t m_dtcourant; /* courant constraint */ Real_t m_dthydro; /* volume change constraint */ Real_t m_dtmax; /* maximum allowable time increment */ Int_t m_cycle; /* iteration count for simulation */ Index_t m_sizeX; /* X,Y,Z extent of this block */ Index_t m_sizeY; Index_t m_sizeZ; Index_t m_numElem; /* Elements/Nodes in this domain */ Index_t m_numNode; } mesh; template <typename T> T* Allocate(size_t size) { return static_cast<T*>(malloc(sizeof(T) * size)); } template <typename T> void Release(T** ptr) { if (*ptr != NULL) { free(*ptr); *ptr = NULL; } } #define GPU_STALE 0 #define CPU_STALE 1 #define ALL_FRESH 2 template <typename T> void freshenGPU(std::vector<T>& cpu, T** gpu, int& stale) { if (stale != GPU_STALE) return; if (!(*gpu)) { CUDA(cudaMalloc(gpu, sizeof(T) * cpu.size())); } CUDA(cudaMemcpyAsync(*gpu, &cpu[0], sizeof(T) * cpu.size(), cudaMemcpyHostToDevice, streamApp)); stale = ALL_FRESH; } template <typename T> void freshenCPU(std::vector<T>& cpu, T* gpu, int& stale) { if (stale != CPU_STALE) return; if (!gpu) { fprintf(stderr, "freshenCPU(): NULL GPU data!\n"); exit(1); } CUDA(cudaMemcpyAsync(&cpu[0], gpu, sizeof(T) * cpu.size(), cudaMemcpyDeviceToHost, streamApp)); stale = ALL_FRESH; } // freshen helpers #define FC(var) \ freshenCPU(mesh.m_##var, meshGPU.m_##var, \ meshGPU.m_##var##_stale); // freshen CPU #define FG(var) \ freshenGPU(mesh.m_##var, &meshGPU.m_##var, \ meshGPU.m_##var##_stale); // freshen GPU // stale helpers #define SC(var) meshGPU.m_##var##_stale = CPU_STALE; // stale CPU #define SG(var) meshGPU.m_##var##_stale = GPU_STALE; // stale GPU struct MeshGPU { Mesh* m_mesh; /******************/ /* Implementation */ /******************/ /* Node-centered */ Real_t* m_x; /* coordinates */ Real_t* m_y; Real_t* m_z; Real_t* m_xd; /* velocities */ Real_t* m_yd; Real_t* m_zd; Real_t* m_xdd; /* accelerations */ Real_t* m_ydd; Real_t* m_zdd; Real_t* m_fx; /* forces */ Real_t* m_fy; Real_t* m_fz; Real_t* m_nodalMass; /* mass */ Index_t* m_symmX; /* symmetry plane nodesets */ Index_t* m_symmY; Index_t* m_symmZ; Int_t* m_nodeElemCount; Index_t* m_nodeElemCornerList; /* Element-centered */ Index_t* m_matElemlist; /* material indexset */ Index_t* m_nodelist; /* elemToNode connectivity */ Index_t* m_lxim; /* element connectivity across each face */ Index_t* m_lxip; Index_t* m_letam; Index_t* m_letap; Index_t* m_lzetam; Index_t* m_lzetap; Int_t* m_elemBC; /* symmetry/free-surface flags for each elem face */ Real_t* m_dxx; /* principal strains -- temporary */ Real_t* m_dyy; Real_t* m_dzz; Real_t* m_delv_xi; /* velocity gradient -- temporary */ Real_t* m_delv_eta; Real_t* m_delv_zeta; Real_t* m_delx_xi; /* coordinate gradient -- temporary */ Real_t* m_delx_eta; Real_t* m_delx_zeta; Real_t* m_e; /* energy */ Real_t* m_p; /* pressure */ Real_t* m_q; /* q */ Real_t* m_ql; /* linear term for q */ Real_t* m_qq; /* quadratic term for q */ Real_t* m_v; /* relative volume */ Real_t* m_volo; /* reference volume */ Real_t* m_vnew; /* new relative volume -- temporary */ Real_t* m_delv; /* m_vnew - m_v */ Real_t* m_vdov; /* volume derivative over volume */ Real_t* m_arealg; /* characteristic length of an element */ Real_t* m_ss; /* "sound speed" */ Real_t* m_elemMass; /* mass */ /* Stale flags */ int m_x_stale, m_y_stale, m_z_stale; int m_xd_stale, m_yd_stale, m_zd_stale; int m_xdd_stale, m_ydd_stale, m_zdd_stale; int m_fx_stale, m_fy_stale, m_fz_stale; int m_nodalMass_stale; int m_symmX_stale, m_symmY_stale, m_symmZ_stale; int m_nodeElemCount_stale, m_nodeElemCornerList_stale; int m_matElemlist_stale, m_nodelist_stale; int m_lxim_stale, m_lxip_stale, m_letam_stale, m_letap_stale, m_lzetam_stale, m_lzetap_stale; int m_elemBC_stale; int m_dxx_stale, m_dyy_stale, m_dzz_stale; int m_delv_xi_stale, m_delv_eta_stale, m_delv_zeta_stale; int m_delx_xi_stale, m_delx_eta_stale, m_delx_zeta_stale; int m_e_stale; int m_p_stale, m_q_stale, m_ql_stale, m_qq_stale; int m_v_stale, m_volo_stale, m_vnew_stale, m_delv_stale, m_vdov_stale; int m_arealg_stale; int m_ss_stale; int m_elemMass_stale; void init(Mesh* mesh) { m_mesh = mesh; m_x = m_y = m_z = NULL; m_xd = m_yd = m_zd = NULL; m_xdd = m_ydd = m_zdd = NULL; m_fx = m_fy = m_fz = NULL; m_nodalMass = NULL; m_symmX = m_symmY = m_symmZ = NULL; m_nodeElemCount = m_nodeElemCornerList = NULL; m_matElemlist = m_nodelist = NULL; m_lxim = m_lxip = m_letam = m_letap = m_lzetam = m_lzetap = NULL; m_elemBC = NULL; m_dxx = m_dyy = m_dzz = NULL; m_delv_xi = m_delv_eta = m_delv_zeta = NULL; m_delx_xi = m_delx_eta = m_delx_zeta = NULL; m_e = NULL; m_p = m_q = m_ql = m_qq = NULL; m_v = m_volo = m_vnew = m_delv = m_vdov = NULL; m_arealg = NULL; m_ss = NULL; m_elemMass = NULL; m_x_stale = m_y_stale = m_z_stale = m_xd_stale = m_yd_stale = m_zd_stale = m_xdd_stale = m_ydd_stale = m_zdd_stale = m_fx_stale = m_fy_stale = m_fz_stale = m_nodalMass_stale = m_symmX_stale = m_symmY_stale = m_symmZ_stale = m_nodeElemCount_stale = m_nodeElemCornerList_stale = m_matElemlist_stale = m_nodelist_stale = m_lxim_stale = m_lxip_stale = m_letam_stale = m_letap_stale = m_lzetam_stale = m_lzetap_stale = m_elemBC_stale = m_dxx_stale = m_dyy_stale = m_dzz_stale = m_delv_xi_stale = m_delv_eta_stale = m_delv_zeta_stale = m_delx_xi_stale = m_delx_eta_stale = m_delx_zeta_stale = m_e_stale = m_p_stale = m_q_stale = m_ql_stale = m_qq_stale = m_v_stale = m_volo_stale = m_vnew_stale = m_delv_stale = m_vdov_stale = m_arealg_stale = m_ss_stale = m_elemMass_stale = GPU_STALE; } void freshenGPU() { #define F(var) ::freshenGPU(m_mesh->m_##var, &m_##var, m_##var##_stale); F(x); F(y); F(z); F(xd); F(yd); F(zd); F(xdd); F(ydd); F(zdd); F(fx); F(fy); F(fz); F(nodalMass); F(symmX); F(symmY); F(symmZ); F(nodeElemCount); F(nodeElemCornerList); F(matElemlist); F(nodelist); F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap); F(elemBC); F(dxx); F(dyy); F(dzz); F(delv_xi); F(delv_eta); F(delv_zeta); F(delx_xi); F(delx_eta); F(delx_zeta); F(e); F(p); F(q); F(ql); F(qq); F(v); F(volo); F(vnew); F(delv); F(vdov); F(arealg); F(ss); F(elemMass); #undef F } void freshenCPU() { #define F(var) ::freshenCPU(m_mesh->m_##var, m_##var, m_##var##_stale); F(x); F(y); F(z); F(xd); F(yd); F(zd); F(xdd); F(ydd); F(zdd); F(fx); F(fy); F(fz); F(nodalMass); F(symmX); F(symmY); F(symmZ); F(nodeElemCount); F(nodeElemCornerList); F(matElemlist); F(nodelist); F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap); F(elemBC); F(dxx); F(dyy); F(dzz); F(delv_xi); F(delv_eta); F(delv_zeta); F(delx_xi); F(delx_eta); F(delx_zeta); F(e); F(p); F(q); F(ql); F(qq); F(v); F(volo); F(vnew); F(delv); F(vdov); F(arealg); F(ss); F(elemMass); #undef F } } meshGPU; /* Stuff needed for boundary conditions */ /* 2 BCs on each of 6 hexahedral faces (12 bits) */ #define XI_M 0x003 #define XI_M_SYMM 0x001 #define XI_M_FREE 0x002 #define XI_P 0x00c #define XI_P_SYMM 0x004 #define XI_P_FREE 0x008 #define ETA_M 0x030 #define ETA_M_SYMM 0x010 #define ETA_M_FREE 0x020 #define ETA_P 0x0c0 #define ETA_P_SYMM 0x040 #define ETA_P_FREE 0x080 #define ZETA_M 0x300 #define ZETA_M_SYMM 0x100 #define ZETA_M_FREE 0x200 #define ZETA_P 0xc00 #define ZETA_P_SYMM 0x400 #define ZETA_P_FREE 0x800 static inline void TimeIncrement() { Real_t targetdt = mesh.stoptime() - mesh.time(); if ((mesh.dtfixed() <= Real_t(0.0)) && (mesh.cycle() != Int_t(0))) { Real_t ratio; Real_t olddt = mesh.deltatime(); /* This will require a reduction in parallel */ Real_t newdt = Real_t(1.0e+20); if (mesh.dtcourant() < newdt) { newdt = mesh.dtcourant() / Real_t(2.0); } if (mesh.dthydro() < newdt) { newdt = mesh.dthydro() * Real_t(2.0) / Real_t(3.0); } ratio = newdt / olddt; if (ratio >= Real_t(1.0)) { if (ratio < mesh.deltatimemultlb()) { newdt = olddt; } else if (ratio > mesh.deltatimemultub()) { newdt = olddt * mesh.deltatimemultub(); } } if (newdt > mesh.dtmax()) { newdt = mesh.dtmax(); } mesh.deltatime() = newdt; } /* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */ if ((targetdt > mesh.deltatime()) && (targetdt < (Real_t(4.0) * mesh.deltatime() / Real_t(3.0)))) { targetdt = Real_t(2.0) * mesh.deltatime() / Real_t(3.0); } if (targetdt < mesh.deltatime()) { mesh.deltatime() = targetdt; } mesh.time() += mesh.deltatime(); ++mesh.cycle(); } __global__ void InitStressTermsForElems_kernel(int numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* p, Real_t* q) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElem) sigxx[i] = sigyy[i] = sigzz[i] = -p[i] - q[i]; } static inline void InitStressTermsForElems_gpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, cudaStream_t stream_app) { dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(numElem, dimBlock.x), 1, 1); // cudaFuncSetCacheConfig(InitStressTermsForElems_kernel,cudaFuncCachePreferL1); // // set as default for all kernels after this one InitStressTermsForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, sigxx, sigyy, sigzz, meshGPU.m_p, meshGPU.m_q); CUDA_DEBUGSYNC; } static inline void InitStressTermsForElems_cpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz) { // // pull in the stresses appropriate to the hydro integration // for (Index_t i = 0; i < numElem; ++i) { sigxx[i] = sigyy[i] = sigzz[i] = -mesh.p(i) - mesh.q(i); } } static inline void InitStressTermsForElems(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(p); FC(q); InitStressTermsForElems_cpu(numElem, sigxx, sigyy, sigzz); } else { FG(p); FG(q); InitStressTermsForElems_gpu(numElem, sigxx, sigyy, sigzz, stream_app); } } __host__ __device__ static inline void CalcElemShapeFunctionDerivatives( const Real_t* const x, const Real_t* const y, const Real_t* const z, Real_t b[][8], Real_t* const volume) { const Real_t x0 = x[0]; const Real_t x1 = x[1]; const Real_t x2 = x[2]; const Real_t x3 = x[3]; const Real_t x4 = x[4]; const Real_t x5 = x[5]; const Real_t x6 = x[6]; const Real_t x7 = x[7]; const Real_t y0 = y[0]; const Real_t y1 = y[1]; const Real_t y2 = y[2]; const Real_t y3 = y[3]; const Real_t y4 = y[4]; const Real_t y5 = y[5]; const Real_t y6 = y[6]; const Real_t y7 = y[7]; const Real_t z0 = z[0]; const Real_t z1 = z[1]; const Real_t z2 = z[2]; const Real_t z3 = z[3]; const Real_t z4 = z[4]; const Real_t z5 = z[5]; const Real_t z6 = z[6]; const Real_t z7 = z[7]; Real_t fjxxi, fjxet, fjxze; Real_t fjyxi, fjyet, fjyze; Real_t fjzxi, fjzet, fjzze; Real_t cjxxi, cjxet, cjxze; Real_t cjyxi, cjyet, cjyze; Real_t cjzxi, cjzet, cjzze; fjxxi = Real_t(.125) * ((x6 - x0) + (x5 - x3) - (x7 - x1) - (x4 - x2)); fjxet = Real_t(.125) * ((x6 - x0) - (x5 - x3) + (x7 - x1) - (x4 - x2)); fjxze = Real_t(.125) * ((x6 - x0) + (x5 - x3) + (x7 - x1) + (x4 - x2)); fjyxi = Real_t(.125) * ((y6 - y0) + (y5 - y3) - (y7 - y1) - (y4 - y2)); fjyet = Real_t(.125) * ((y6 - y0) - (y5 - y3) + (y7 - y1) - (y4 - y2)); fjyze = Real_t(.125) * ((y6 - y0) + (y5 - y3) + (y7 - y1) + (y4 - y2)); fjzxi = Real_t(.125) * ((z6 - z0) + (z5 - z3) - (z7 - z1) - (z4 - z2)); fjzet = Real_t(.125) * ((z6 - z0) - (z5 - z3) + (z7 - z1) - (z4 - z2)); fjzze = Real_t(.125) * ((z6 - z0) + (z5 - z3) + (z7 - z1) + (z4 - z2)); /* compute cofactors */ cjxxi = (fjyet * fjzze) - (fjzet * fjyze); cjxet = -(fjyxi * fjzze) + (fjzxi * fjyze); cjxze = (fjyxi * fjzet) - (fjzxi * fjyet); cjyxi = -(fjxet * fjzze) + (fjzet * fjxze); cjyet = (fjxxi * fjzze) - (fjzxi * fjxze); cjyze = -(fjxxi * fjzet) + (fjzxi * fjxet); cjzxi = (fjxet * fjyze) - (fjyet * fjxze); cjzet = -(fjxxi * fjyze) + (fjyxi * fjxze); cjzze = (fjxxi * fjyet) - (fjyxi * fjxet); /* calculate partials : this need only be done for l = 0,1,2,3 since , by symmetry , (6,7,4,5) = - (0,1,2,3) . */ b[0][0] = -cjxxi - cjxet - cjxze; b[0][1] = cjxxi - cjxet - cjxze; b[0][2] = cjxxi + cjxet - cjxze; b[0][3] = -cjxxi + cjxet - cjxze; b[0][4] = -b[0][2]; b[0][5] = -b[0][3]; b[0][6] = -b[0][0]; b[0][7] = -b[0][1]; b[1][0] = -cjyxi - cjyet - cjyze; b[1][1] = cjyxi - cjyet - cjyze; b[1][2] = cjyxi + cjyet - cjyze; b[1][3] = -cjyxi + cjyet - cjyze; b[1][4] = -b[1][2]; b[1][5] = -b[1][3]; b[1][6] = -b[1][0]; b[1][7] = -b[1][1]; b[2][0] = -cjzxi - cjzet - cjzze; b[2][1] = cjzxi - cjzet - cjzze; b[2][2] = cjzxi + cjzet - cjzze; b[2][3] = -cjzxi + cjzet - cjzze; b[2][4] = -b[2][2]; b[2][5] = -b[2][3]; b[2][6] = -b[2][0]; b[2][7] = -b[2][1]; /* calculate jacobian determinant (volume) */ *volume = Real_t(8.) * (fjxet * cjxet + fjyet * cjyet + fjzet * cjzet); } __host__ __device__ static inline void SumElemFaceNormal(Real_t* normalX0, Real_t* normalY0, Real_t* normalZ0, Real_t* normalX1, Real_t* normalY1, Real_t* normalZ1, Real_t* normalX2, Real_t* normalY2, Real_t* normalZ2, Real_t* normalX3, Real_t* normalY3, Real_t* normalZ3, const Real_t x0, const Real_t y0, const Real_t z0, const Real_t x1, const Real_t y1, const Real_t z1, const Real_t x2, const Real_t y2, const Real_t z2, const Real_t x3, const Real_t y3, const Real_t z3) { Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0); Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0); Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0); Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0); Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0); Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0); Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1); Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1); Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1); *normalX0 += areaX; *normalX1 += areaX; *normalX2 += areaX; *normalX3 += areaX; *normalY0 += areaY; *normalY1 += areaY; *normalY2 += areaY; *normalY3 += areaY; *normalZ0 += areaZ; *normalZ1 += areaZ; *normalZ2 += areaZ; *normalZ3 += areaZ; } __host__ __device__ static inline void CalcElemNodeNormals(Real_t pfx[8], Real_t pfy[8], Real_t pfz[8], const Real_t x[8], const Real_t y[8], const Real_t z[8]) { for (Index_t i = 0; i < 8; ++i) { pfx[i] = Real_t(0.0); pfy[i] = Real_t(0.0); pfz[i] = Real_t(0.0); } /* evaluate face one: nodes 0, 1, 2, 3 */ SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0], &pfx[1], &pfy[1], &pfz[1], &pfx[2], &pfy[2], &pfz[2], &pfx[3], &pfy[3], &pfz[3], x[0], y[0], z[0], x[1], y[1], z[1], x[2], y[2], z[2], x[3], y[3], z[3]); /* evaluate face two: nodes 0, 4, 5, 1 */ SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0], &pfx[4], &pfy[4], &pfz[4], &pfx[5], &pfy[5], &pfz[5], &pfx[1], &pfy[1], &pfz[1], x[0], y[0], z[0], x[4], y[4], z[4], x[5], y[5], z[5], x[1], y[1], z[1]); /* evaluate face three: nodes 1, 5, 6, 2 */ SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1], &pfx[5], &pfy[5], &pfz[5], &pfx[6], &pfy[6], &pfz[6], &pfx[2], &pfy[2], &pfz[2], x[1], y[1], z[1], x[5], y[5], z[5], x[6], y[6], z[6], x[2], y[2], z[2]); /* evaluate face four: nodes 2, 6, 7, 3 */ SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2], &pfx[6], &pfy[6], &pfz[6], &pfx[7], &pfy[7], &pfz[7], &pfx[3], &pfy[3], &pfz[3], x[2], y[2], z[2], x[6], y[6], z[6], x[7], y[7], z[7], x[3], y[3], z[3]); /* evaluate face five: nodes 3, 7, 4, 0 */ SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3], &pfx[7], &pfy[7], &pfz[7], &pfx[4], &pfy[4], &pfz[4], &pfx[0], &pfy[0], &pfz[0], x[3], y[3], z[3], x[7], y[7], z[7], x[4], y[4], z[4], x[0], y[0], z[0]); /* evaluate face six: nodes 4, 7, 6, 5 */ SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4], &pfx[7], &pfy[7], &pfz[7], &pfx[6], &pfy[6], &pfz[6], &pfx[5], &pfy[5], &pfz[5], x[4], y[4], z[4], x[7], y[7], z[7], x[6], y[6], z[6], x[5], y[5], z[5]); } __host__ __device__ static inline void SumElemStressesToNodeForces( const Real_t B[][8], const Real_t stress_xx, const Real_t stress_yy, const Real_t stress_zz, Real_t* const fx, Real_t* const fy, Real_t* const fz, int stride) { Real_t pfx0 = B[0][0]; Real_t pfx1 = B[0][1]; Real_t pfx2 = B[0][2]; Real_t pfx3 = B[0][3]; Real_t pfx4 = B[0][4]; Real_t pfx5 = B[0][5]; Real_t pfx6 = B[0][6]; Real_t pfx7 = B[0][7]; Real_t pfy0 = B[1][0]; Real_t pfy1 = B[1][1]; Real_t pfy2 = B[1][2]; Real_t pfy3 = B[1][3]; Real_t pfy4 = B[1][4]; Real_t pfy5 = B[1][5]; Real_t pfy6 = B[1][6]; Real_t pfy7 = B[1][7]; Real_t pfz0 = B[2][0]; Real_t pfz1 = B[2][1]; Real_t pfz2 = B[2][2]; Real_t pfz3 = B[2][3]; Real_t pfz4 = B[2][4]; Real_t pfz5 = B[2][5]; Real_t pfz6 = B[2][6]; Real_t pfz7 = B[2][7]; fx[0 * stride] = -(stress_xx * pfx0); fx[1 * stride] = -(stress_xx * pfx1); fx[2 * stride] = -(stress_xx * pfx2); fx[3 * stride] = -(stress_xx * pfx3); fx[4 * stride] = -(stress_xx * pfx4); fx[5 * stride] = -(stress_xx * pfx5); fx[6 * stride] = -(stress_xx * pfx6); fx[7 * stride] = -(stress_xx * pfx7); fy[0 * stride] = -(stress_yy * pfy0); fy[1 * stride] = -(stress_yy * pfy1); fy[2 * stride] = -(stress_yy * pfy2); fy[3 * stride] = -(stress_yy * pfy3); fy[4 * stride] = -(stress_yy * pfy4); fy[5 * stride] = -(stress_yy * pfy5); fy[6 * stride] = -(stress_yy * pfy6); fy[7 * stride] = -(stress_yy * pfy7); fz[0 * stride] = -(stress_zz * pfz0); fz[1 * stride] = -(stress_zz * pfz1); fz[2 * stride] = -(stress_zz * pfz2); fz[3 * stride] = -(stress_zz * pfz3); fz[4 * stride] = -(stress_zz * pfz4); fz[5 * stride] = -(stress_zz * pfz5); fz[6 * stride] = -(stress_zz * pfz6); fz[7 * stride] = -(stress_zz * pfz7); } __global__ void IntegrateStressForElems_kernel(Index_t numElem, Index_t* nodelist, Real_t* x, Real_t* y, Real_t* z, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ) { Real_t B[3][8]; // shape function derivatives Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numElem) { // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = nodelist[k + lnode * numElem]; x_local[lnode] = x[gnode]; y_local[lnode] = y[gnode]; z_local[lnode] = z[gnode]; } /* Volume calculation involves extra work for numerical consistency. */ CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &determ[k]); CalcElemNodeNormals(B[0], B[1], B[2], x_local, y_local, z_local); SumElemStressesToNodeForces(B, sigxx[k], sigyy[k], sigzz[k], &fx_elem[k], &fy_elem[k], &fz_elem[k], numElem); } } __global__ void AddNodeForcesFromElems_kernel(Index_t numNode, Int_t* nodeElemCount, Index_t* nodeElemCornerList, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem, Real_t* fx_node, Real_t* fy_node, Real_t* fz_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { Int_t count = nodeElemCount[i]; Real_t fx, fy, fz; fx = fy = fz = Real_t(0.0); for (int j = 0; j < count; j++) { Index_t elem = nodeElemCornerList[i + numNode * j]; fx += fx_elem[elem]; fy += fy_elem[elem]; fz += fz_elem[elem]; } fx_node[i] = fx; fy_node[i] = fy; fz_node[i] = fz; } } __global__ void AddNodeForcesFromElems2_kernel(Index_t numNode, Int_t* nodeElemCount, Index_t* nodeElemCornerList, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem, Real_t* fx_node, Real_t* fy_node, Real_t* fz_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { Int_t count = nodeElemCount[i]; Real_t fx, fy, fz; fx = fy = fz = Real_t(0.0); for (int j = 0; j < count; j++) { Index_t elem = nodeElemCornerList[i + numNode * j]; fx += fx_elem[elem]; fy += fy_elem[elem]; fz += fz_elem[elem]; } fx_node[i] += fx; fy_node[i] += fy; fz_node[i] += fz; } } static inline void IntegrateStressForElems_gpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ, int& badvol, cudaStream_t stream_app) { Real_t *fx_elem, *fy_elem, *fz_elem; CUDA(cudaMalloc(&fx_elem, numElem * 8 * sizeof(Real_t))); CUDA(cudaMalloc(&fy_elem, numElem * 8 * sizeof(Real_t))); CUDA(cudaMalloc(&fz_elem, numElem * 8 * sizeof(Real_t))); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); IntegrateStressForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, fx_elem, fy_elem, fz_elem, sigxx, sigyy, sigzz, determ); CUDA_DEBUGSYNC; dimGrid = dim3(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); AddNodeForcesFromElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( mesh.numNode(), meshGPU.m_nodeElemCount, meshGPU.m_nodeElemCornerList, fx_elem, fy_elem, fz_elem, meshGPU.m_fx, meshGPU.m_fy, meshGPU.m_fz); CUDA_DEBUGSYNC; // CUDA( cudaFree(fx_elem) ); // CUDA( cudaFree(fy_elem) ); // CUDA( cudaFree(fz_elem) ); // JDC -- need a reduction step to check for non-positive element volumes badvol = 0; } static inline void IntegrateStressForElems_cpu(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ, int& badvol) { Real_t B[3][8]; // shape function derivatives Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; Real_t fx_local[8]; Real_t fy_local[8]; Real_t fz_local[8]; // loop over all elements for (Index_t k = 0; k < numElem; ++k) { // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); x_local[lnode] = mesh.x(gnode); y_local[lnode] = mesh.y(gnode); z_local[lnode] = mesh.z(gnode); } /* Volume calculation involves extra work for numerical consistency. */ CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &determ[k]); CalcElemNodeNormals(B[0], B[1], B[2], x_local, y_local, z_local); SumElemStressesToNodeForces(B, sigxx[k], sigyy[k], sigzz[k], fx_local, fy_local, fz_local, 1); // copy nodal force contributions to global force arrray. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); mesh.fx(gnode) += fx_local[lnode]; mesh.fy(gnode) += fy_local[lnode]; mesh.fz(gnode) += fz_local[lnode]; } } badvol = 0; for (Index_t k = 0; k < numElem; ++k) { if (determ[k] <= Real_t(0.0)) { badvol = 1; } } } static inline void IntegrateStressForElems(Index_t numElem, Real_t* sigxx, Real_t* sigyy, Real_t* sigzz, Real_t* determ, int& badvol, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(nodelist); FC(x); FC(y); FC(z); IntegrateStressForElems_cpu(numElem, sigxx, sigyy, sigzz, determ, badvol); SG(fx); SG(fy); SG(fz); } else { FG(nodelist); FG(nodeElemCount); FG(nodeElemCornerList); FG(x); FG(y); FG(z); IntegrateStressForElems_gpu(numElem, sigxx, sigyy, sigzz, determ, badvol, stream_app); SC(fx); SC(fy); SC(fz); } } static inline void CollectDomainNodesToElemNodes(const Index_t elemNum, Real_t elemX[8], Real_t elemY[8], Real_t elemZ[8]) { Index_t nd0i = mesh.nodelist(elemNum, 0); Index_t nd1i = mesh.nodelist(elemNum, 1); Index_t nd2i = mesh.nodelist(elemNum, 2); Index_t nd3i = mesh.nodelist(elemNum, 3); Index_t nd4i = mesh.nodelist(elemNum, 4); Index_t nd5i = mesh.nodelist(elemNum, 5); Index_t nd6i = mesh.nodelist(elemNum, 6); Index_t nd7i = mesh.nodelist(elemNum, 7); elemX[0] = mesh.x(nd0i); elemX[1] = mesh.x(nd1i); elemX[2] = mesh.x(nd2i); elemX[3] = mesh.x(nd3i); elemX[4] = mesh.x(nd4i); elemX[5] = mesh.x(nd5i); elemX[6] = mesh.x(nd6i); elemX[7] = mesh.x(nd7i); elemY[0] = mesh.y(nd0i); elemY[1] = mesh.y(nd1i); elemY[2] = mesh.y(nd2i); elemY[3] = mesh.y(nd3i); elemY[4] = mesh.y(nd4i); elemY[5] = mesh.y(nd5i); elemY[6] = mesh.y(nd6i); elemY[7] = mesh.y(nd7i); elemZ[0] = mesh.z(nd0i); elemZ[1] = mesh.z(nd1i); elemZ[2] = mesh.z(nd2i); elemZ[3] = mesh.z(nd3i); elemZ[4] = mesh.z(nd4i); elemZ[5] = mesh.z(nd5i); elemZ[6] = mesh.z(nd6i); elemZ[7] = mesh.z(nd7i); } __host__ static inline void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz) { const Real_t twelfth = Real_t(1.0) / Real_t(12.0); *dvdx = (y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) + (y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) - (y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5); *dvdy = -(x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) - (x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) + (x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5); *dvdz = -(y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) - (y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) + (y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5); *dvdx *= twelfth; *dvdy *= twelfth; *dvdz *= twelfth; } #if 0 __device__ static inline void VOLUDER(const Real_t a0, const Real_t a1, const Real_t a2, const Real_t a3, const Real_t a4, const Real_t a5, const Real_t b0, const Real_t b1, const Real_t b2, const Real_t b3, const Real_t b4, const Real_t b5, Real_t& dvdc) { const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; dvdc= (a1 + a2) * (b0 + b1) - (a0 + a1) * (b1 + b2) + (a0 + a4) * (b3 + b4) - (a3 + a4) * (b0 + b4) - (a2 + a5) * (b3 + b5) + (a3 + a5) * (b2 + b5); dvdc *= twelfth; } #else // Even though the above version is inlined, it seems to prohibit some kind of // compiler optimization. // This macro version uses many fewer registers and avoids spill-over into local // memory. #define VOLUDER(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4, b5, dvdc) \ { \ const Real_t twelfth = Real_t(1.0) / Real_t(12.0); \ \ dvdc = ((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \ ((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \ ((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \ dvdc *= twelfth; \ } #endif __host__ static inline void CalcElemVolumeDerivative(Real_t dvdx[8], Real_t dvdy[8], Real_t dvdz[8], const Real_t x[8], const Real_t y[8], const Real_t z[8]) { VoluDer(x[1], x[2], x[3], x[4], x[5], x[7], y[1], y[2], y[3], y[4], y[5], y[7], z[1], z[2], z[3], z[4], z[5], z[7], &dvdx[0], &dvdy[0], &dvdz[0]); VoluDer(x[0], x[1], x[2], x[7], x[4], x[6], y[0], y[1], y[2], y[7], y[4], y[6], z[0], z[1], z[2], z[7], z[4], z[6], &dvdx[3], &dvdy[3], &dvdz[3]); VoluDer(x[3], x[0], x[1], x[6], x[7], x[5], y[3], y[0], y[1], y[6], y[7], y[5], z[3], z[0], z[1], z[6], z[7], z[5], &dvdx[2], &dvdy[2], &dvdz[2]); VoluDer(x[2], x[3], x[0], x[5], x[6], x[4], y[2], y[3], y[0], y[5], y[6], y[4], z[2], z[3], z[0], z[5], z[6], z[4], &dvdx[1], &dvdy[1], &dvdz[1]); VoluDer(x[7], x[6], x[5], x[0], x[3], x[1], y[7], y[6], y[5], y[0], y[3], y[1], z[7], z[6], z[5], z[0], z[3], z[1], &dvdx[4], &dvdy[4], &dvdz[4]); VoluDer(x[4], x[7], x[6], x[1], x[0], x[2], y[4], y[7], y[6], y[1], y[0], y[2], z[4], z[7], z[6], z[1], z[0], z[2], &dvdx[5], &dvdy[5], &dvdz[5]); VoluDer(x[5], x[4], x[7], x[2], x[1], x[3], y[5], y[4], y[7], y[2], y[1], y[3], z[5], z[4], z[7], z[2], z[1], z[3], &dvdx[6], &dvdy[6], &dvdz[6]); VoluDer(x[6], x[5], x[4], x[3], x[2], x[0], y[6], y[5], y[4], y[3], y[2], y[0], z[6], z[5], z[4], z[3], z[2], z[0], &dvdx[7], &dvdy[7], &dvdz[7]); } __device__ static inline void CalcElemVolumeDerivative(Real_t& dvdx, Real_t& dvdy, Real_t& dvdz, const Real_t x, const Real_t y, const Real_t z, unsigned int node) { __shared__ Real_t array1[256], array2[256]; volatile Real_t* va1; volatile Real_t* va2; unsigned int idx, elem; unsigned int ind0, ind1, ind2, ind3, ind4, ind5; switch (node) { case 0: { ind0 = 1; ind1 = 2; ind2 = 3; ind3 = 4; ind4 = 5; ind5 = 7; break; } case 1: { ind0 = 2; ind1 = 3; ind2 = 0; ind3 = 5; ind4 = 6; ind5 = 4; break; } case 2: { ind0 = 3; ind1 = 0; ind2 = 1; ind3 = 6; ind4 = 7; ind5 = 5; break; } case 3: { ind0 = 0; ind1 = 1; ind2 = 2; ind3 = 7; ind4 = 4; ind5 = 6; break; } case 4: { ind0 = 7; ind1 = 6; ind2 = 5; ind3 = 0; ind4 = 3; ind5 = 1; break; } case 5: { ind0 = 4; ind1 = 7; ind2 = 6; ind3 = 1; ind4 = 0; ind5 = 2; break; } case 6: { ind0 = 5; ind1 = 4; ind2 = 7; ind3 = 2; ind4 = 1; ind5 = 3; break; } case 7: { ind0 = 6; ind1 = 5; ind2 = 4; ind3 = 3; ind4 = 2; ind5 = 0; break; } default: { ind0 = ind1 = ind2 = ind3 = ind4 = ind5 = 0xFFFFFFFF; break; } } idx = threadIdx.x; elem = idx /*& 0x1F*/ - node * 32; va1 = &array1[0]; va2 = &array2[0]; // load y and z __syncthreads(); va1[idx] = y; va2[idx] = z; __syncthreads(); VOLUDER(va1[ind0 * 32 + elem], va1[ind1 * 32 + elem], va1[ind2 * 32 + elem], va1[ind3 * 32 + elem], va1[ind4 * 32 + elem], va1[ind5 * 32 + elem], va2[ind0 * 32 + elem], va2[ind1 * 32 + elem], va2[ind2 * 32 + elem], va2[ind3 * 32 + elem], va2[ind4 * 32 + elem], va2[ind5 * 32 + elem], dvdx); // load x __syncthreads(); va1[idx] = x; __syncthreads(); VOLUDER(va2[ind0 * 32 + elem], va2[ind1 * 32 + elem], va2[ind2 * 32 + elem], va2[ind3 * 32 + elem], va2[ind4 * 32 + elem], va2[ind5 * 32 + elem], va1[ind0 * 32 + elem], va1[ind1 * 32 + elem], va1[ind2 * 32 + elem], va1[ind3 * 32 + elem], va1[ind4 * 32 + elem], va1[ind5 * 32 + elem], dvdy); __syncthreads(); // load y __syncthreads(); va2[idx] = y; __syncthreads(); VOLUDER(va1[ind0 * 32 + elem], va1[ind1 * 32 + elem], va1[ind2 * 32 + elem], va1[ind3 * 32 + elem], va1[ind4 * 32 + elem], va1[ind5 * 32 + elem], va2[ind0 * 32 + elem], va2[ind1 * 32 + elem], va2[ind2 * 32 + elem], va2[ind3 * 32 + elem], va2[ind4 * 32 + elem], va2[ind5 * 32 + elem], dvdz); __syncthreads(); } __host__ static inline void CalcElemFBHourglassForce(Real_t* xd, Real_t* yd, Real_t* zd, Real_t* hourgam0, Real_t* hourgam1, Real_t* hourgam2, Real_t* hourgam3, Real_t* hourgam4, Real_t* hourgam5, Real_t* hourgam6, Real_t* hourgam7, Real_t coefficient, Real_t* hgfx, Real_t* hgfy, Real_t* hgfz) { Index_t i00 = 0; Index_t i01 = 1; Index_t i02 = 2; Index_t i03 = 3; Real_t h00 = hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] + hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] + hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] + hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7]; Real_t h01 = hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] + hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] + hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] + hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7]; Real_t h02 = hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1] + hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3] + hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5] + hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7]; Real_t h03 = hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] + hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] + hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] + hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7]; hgfx[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfx[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfx[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfx[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfx[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfx[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfx[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfx[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); h00 = hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] + hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] + hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] + hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7]; h01 = hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] + hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] + hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] + hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7]; h02 = hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1] + hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3] + hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5] + hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7]; h03 = hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] + hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] + hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] + hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7]; hgfy[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfy[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfy[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfy[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfy[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfy[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfy[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfy[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); h00 = hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] + hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] + hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] + hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7]; h01 = hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] + hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] + hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] + hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7]; h02 = hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1] + hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3] + hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5] + hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7]; h03 = hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] + hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] + hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] + hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7]; hgfz[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfz[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfz[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfz[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfz[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfz[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfz[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfz[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); } __shared__ Real_t shm_array[32 * 8]; __device__ static inline Real_t SumOverNodes(Real_t val) { // Sum up 8 node values for each element // Assumes 256 threads: 32 elements, 8 nodes per element. // NOTE: we could probably avoid some of the __syncthreads() if we map 8 nodes // of an element to the same warp. unsigned int tid = threadIdx.x; #if 1 #if 0 unsigned int node=tid>>5; unsigned int elem=tid-(node<<5); #elif 1 unsigned int node = tid / 32; unsigned int elem = tid - (node * 32); #else unsigned int elem = tid & 0x1F; #endif __syncthreads(); shm_array[tid] = val; __syncthreads(); if (tid < 128) shm_array[tid] += shm_array[tid + 128]; __syncthreads(); if (tid < 64) shm_array[tid] += shm_array[tid + 64]; __syncthreads(); if (tid < 32) shm_array[tid] += shm_array[tid + 32]; __syncthreads(); Real_t ret = shm_array[elem]; __syncthreads(); return ret; #else #if 0 unsigned int node=tid>>5; unsigned int elem=tid-(node<<5); #else unsigned int node = tid / 32; unsigned int elem = tid - (node * 32); #endif unsigned int idx = elem * 8 + node; __syncthreads(); shm_array[idx] = val; __syncthreads(); if (node < 4) shm_array[idx] += shm_array[idx + 4]; if (node < 2) shm_array[idx] += shm_array[idx + 2]; if (node < 1) shm_array[idx] += shm_array[idx + 1]; __syncthreads(); return shm_array[elem * 8]; #endif } __device__ static inline void CalcElemFBHourglassForce(Real_t xd, Real_t yd, Real_t zd, Real_t* hourgam, Real_t coefficient, Real_t& hgfx, Real_t& hgfy, Real_t& hgfz) { hgfx = 0; for (int i = 0; i < 4; i++) { Real_t h; h = hourgam[i] * xd; h = SumOverNodes(h); hgfx += hourgam[i] * h; } hgfx *= coefficient; hgfy = 0; for (int i = 0; i < 4; i++) { Real_t h; h = hourgam[i] * yd; h = SumOverNodes(h); hgfy += hourgam[i] * h; } hgfy *= coefficient; hgfz = 0; for (int i = 0; i < 4; i++) { Real_t h; h = hourgam[i] * zd; h = SumOverNodes(h); hgfz += hourgam[i] * h; } hgfz *= coefficient; } __global__ void CalcFBHourglassForceForElems_kernel(Real_t* determ, Real_t* x8n, Real_t* y8n, Real_t* z8n, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t hourg, Index_t numElem, Index_t* nodelist, Real_t* ss, Real_t* elemMass, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* fx_elem, Real_t* fy_elem, Real_t* fz_elem) { /************************************************* * * FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass * force. * *************************************************/ Real_t hgfx, hgfy, hgfz; Real_t coefficient; Real_t hourgam[4]; Real_t xd1, yd1, zd1; /*************************************************/ /* compute the hourglass modes */ const Real_t posf = Real_t(1.); const Real_t negf = Real_t(-1.); // Assume we will launch 256 threads, which we map to 32 elements, each // with 8 per-node threads. Organize so each warp of 32 consecutive // threads operates on the same node of different elements. // THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!! unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; #if 0 unsigned int node=tid>>5; unsigned int elem=bid<<5 + (tid - (node<<5)); #elif 1 unsigned int node = tid / 32; unsigned int elem = bid * 32 + (tid - node * 32); #elif 0 unsigned int node = tid / 32; ; unsigned int elem = bid * 32 + (tid & 0x1F); #elif 0 unsigned int node = tid / 32; unsigned int elem = bid << 5 + (tid & 0x1F); #elif 0 unsigned int node = tid >> 5; unsigned int elem = bid * 32 + (tid & 0x1F); #else unsigned int node = tid >> 5; unsigned int elem = bid << 5 + (tid & 0x1F); #endif if (elem >= numElem) elem = numElem - 1; // don't return -- need thread to participate in sync operations // if (elem<0) elem=0; // debugging test Real_t volinv = Real_t(1.0) / determ[elem]; Real_t ss1, mass1, volume13; Real_t xn, yn, zn, dvdxn, dvdyn, dvdzn; Real_t hourmodx, hourmody, hourmodz; #if 1 xn = x8n[elem + numElem * node]; yn = y8n[elem + numElem * node]; zn = z8n[elem + numElem * node]; dvdxn = dvdx[elem + numElem * node]; dvdyn = dvdy[elem + numElem * node]; dvdzn = dvdz[elem + numElem * node]; #else xn = yn = zn = posf; dvdxn = dvdyn = dvdzn = negf; #endif #if 1 hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 2 || node == 3 || node == 4 || node == 5) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[0] = negf; } else hourgam[0] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[0] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 1 || node == 2 || node == 4 || node == 7) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[1] = negf; } else hourgam[1] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[1] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 1 || node == 3 || node == 5 || node == 7) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[2] = negf; } else hourgam[2] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[2] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); hourmodx = xn; hourmody = yn; hourmodz = zn; if (node == 0 || node == 2 || node == 5 || node == 7) { hourmodx *= negf; hourmody *= negf; hourmodz *= negf; hourgam[3] = negf; } else hourgam[3] = posf; hourmodx = SumOverNodes(hourmodx); hourmody = SumOverNodes(hourmody); hourmodz = SumOverNodes(hourmodz); hourgam[3] -= volinv * (dvdxn * hourmodx + dvdyn * hourmody + dvdzn * hourmodz); /* compute forces */ /* store forces into h arrays (force arrays) */ ss1 = ss[elem]; mass1 = elemMass[elem]; volume13 = CBRT(determ[elem]); Index_t ni = nodelist[elem + numElem * node]; xd1 = xd[ni]; yd1 = yd[ni]; zd1 = zd[ni]; coefficient = -hourg * Real_t(0.01) * ss1 * mass1 / volume13; CalcElemFBHourglassForce(xd1, yd1, zd1, hourgam, coefficient, hgfx, hgfy, hgfz); #else hgfx = xn + dvdxn; hgfy = yn + dvdyn; hgfz = zn + dvdzn; #endif #if 1 fx_elem[elem + numElem * node] = hgfx; fy_elem[elem + numElem * node] = hgfy; fz_elem[elem + numElem * node] = hgfz; #else fx_elem[0] = hgfx; fy_elem[0] = hgfy; fz_elem[0] = hgfz; #endif } static inline void CalcFBHourglassForceForElems_cpu(Real_t* determ, Real_t* x8n, Real_t* y8n, Real_t* z8n, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t hourg) { /************************************************* * * FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass * force. * *************************************************/ Index_t numElem = mesh.numElem(); Real_t hgfx[8], hgfy[8], hgfz[8]; Real_t coefficient; Real_t gamma[4][8]; Real_t hourgam0[4], hourgam1[4], hourgam2[4], hourgam3[4]; Real_t hourgam4[4], hourgam5[4], hourgam6[4], hourgam7[4]; Real_t xd1[8], yd1[8], zd1[8]; gamma[0][0] = Real_t(1.); gamma[0][1] = Real_t(1.); gamma[0][2] = Real_t(-1.); gamma[0][3] = Real_t(-1.); gamma[0][4] = Real_t(-1.); gamma[0][5] = Real_t(-1.); gamma[0][6] = Real_t(1.); gamma[0][7] = Real_t(1.); gamma[1][0] = Real_t(1.); gamma[1][1] = Real_t(-1.); gamma[1][2] = Real_t(-1.); gamma[1][3] = Real_t(1.); gamma[1][4] = Real_t(-1.); gamma[1][5] = Real_t(1.); gamma[1][6] = Real_t(1.); gamma[1][7] = Real_t(-1.); gamma[2][0] = Real_t(1.); gamma[2][1] = Real_t(-1.); gamma[2][2] = Real_t(1.); gamma[2][3] = Real_t(-1.); gamma[2][4] = Real_t(1.); gamma[2][5] = Real_t(-1.); gamma[2][6] = Real_t(1.); gamma[2][7] = Real_t(-1.); gamma[3][0] = Real_t(-1.); gamma[3][1] = Real_t(1.); gamma[3][2] = Real_t(-1.); gamma[3][3] = Real_t(1.); gamma[3][4] = Real_t(1.); gamma[3][5] = Real_t(-1.); gamma[3][6] = Real_t(1.); gamma[3][7] = Real_t(-1.); /*************************************************/ /* compute the hourglass modes */ for (Index_t i2 = 0; i2 < numElem; ++i2) { Index_t i3 = 8 * i2; Real_t volinv = Real_t(1.0) / determ[i2]; Real_t ss1, mass1, volume13; for (Index_t i1 = 0; i1 < 4; ++i1) { Real_t hourmodx = x8n[i3] * gamma[i1][0] + x8n[i3 + 1] * gamma[i1][1] + x8n[i3 + 2] * gamma[i1][2] + x8n[i3 + 3] * gamma[i1][3] + x8n[i3 + 4] * gamma[i1][4] + x8n[i3 + 5] * gamma[i1][5] + x8n[i3 + 6] * gamma[i1][6] + x8n[i3 + 7] * gamma[i1][7]; Real_t hourmody = y8n[i3] * gamma[i1][0] + y8n[i3 + 1] * gamma[i1][1] + y8n[i3 + 2] * gamma[i1][2] + y8n[i3 + 3] * gamma[i1][3] + y8n[i3 + 4] * gamma[i1][4] + y8n[i3 + 5] * gamma[i1][5] + y8n[i3 + 6] * gamma[i1][6] + y8n[i3 + 7] * gamma[i1][7]; Real_t hourmodz = z8n[i3] * gamma[i1][0] + z8n[i3 + 1] * gamma[i1][1] + z8n[i3 + 2] * gamma[i1][2] + z8n[i3 + 3] * gamma[i1][3] + z8n[i3 + 4] * gamma[i1][4] + z8n[i3 + 5] * gamma[i1][5] + z8n[i3 + 6] * gamma[i1][6] + z8n[i3 + 7] * gamma[i1][7]; hourgam0[i1] = gamma[i1][0] - volinv * (dvdx[i3] * hourmodx + dvdy[i3] * hourmody + dvdz[i3] * hourmodz); hourgam1[i1] = gamma[i1][1] - volinv * (dvdx[i3 + 1] * hourmodx + dvdy[i3 + 1] * hourmody + dvdz[i3 + 1] * hourmodz); hourgam2[i1] = gamma[i1][2] - volinv * (dvdx[i3 + 2] * hourmodx + dvdy[i3 + 2] * hourmody + dvdz[i3 + 2] * hourmodz); hourgam3[i1] = gamma[i1][3] - volinv * (dvdx[i3 + 3] * hourmodx + dvdy[i3 + 3] * hourmody + dvdz[i3 + 3] * hourmodz); hourgam4[i1] = gamma[i1][4] - volinv * (dvdx[i3 + 4] * hourmodx + dvdy[i3 + 4] * hourmody + dvdz[i3 + 4] * hourmodz); hourgam5[i1] = gamma[i1][5] - volinv * (dvdx[i3 + 5] * hourmodx + dvdy[i3 + 5] * hourmody + dvdz[i3 + 5] * hourmodz); hourgam6[i1] = gamma[i1][6] - volinv * (dvdx[i3 + 6] * hourmodx + dvdy[i3 + 6] * hourmody + dvdz[i3 + 6] * hourmodz); hourgam7[i1] = gamma[i1][7] - volinv * (dvdx[i3 + 7] * hourmodx + dvdy[i3 + 7] * hourmody + dvdz[i3 + 7] * hourmodz); } /* compute forces */ /* store forces into h arrays (force arrays) */ ss1 = mesh.ss(i2); mass1 = mesh.elemMass(i2); volume13 = CBRT(determ[i2]); Index_t n0si2 = mesh.nodelist(i2, 0); Index_t n1si2 = mesh.nodelist(i2, 1); Index_t n2si2 = mesh.nodelist(i2, 2); Index_t n3si2 = mesh.nodelist(i2, 3); Index_t n4si2 = mesh.nodelist(i2, 4); Index_t n5si2 = mesh.nodelist(i2, 5); Index_t n6si2 = mesh.nodelist(i2, 6); Index_t n7si2 = mesh.nodelist(i2, 7); xd1[0] = mesh.xd(n0si2); xd1[1] = mesh.xd(n1si2); xd1[2] = mesh.xd(n2si2); xd1[3] = mesh.xd(n3si2); xd1[4] = mesh.xd(n4si2); xd1[5] = mesh.xd(n5si2); xd1[6] = mesh.xd(n6si2); xd1[7] = mesh.xd(n7si2); yd1[0] = mesh.yd(n0si2); yd1[1] = mesh.yd(n1si2); yd1[2] = mesh.yd(n2si2); yd1[3] = mesh.yd(n3si2); yd1[4] = mesh.yd(n4si2); yd1[5] = mesh.yd(n5si2); yd1[6] = mesh.yd(n6si2); yd1[7] = mesh.yd(n7si2); zd1[0] = mesh.zd(n0si2); zd1[1] = mesh.zd(n1si2); zd1[2] = mesh.zd(n2si2); zd1[3] = mesh.zd(n3si2); zd1[4] = mesh.zd(n4si2); zd1[5] = mesh.zd(n5si2); zd1[6] = mesh.zd(n6si2); zd1[7] = mesh.zd(n7si2); coefficient = -hourg * Real_t(0.01) * ss1 * mass1 / volume13; CalcElemFBHourglassForce(xd1, yd1, zd1, hourgam0, hourgam1, hourgam2, hourgam3, hourgam4, hourgam5, hourgam6, hourgam7, coefficient, hgfx, hgfy, hgfz); mesh.fx(n0si2) += hgfx[0]; mesh.fy(n0si2) += hgfy[0]; mesh.fz(n0si2) += hgfz[0]; mesh.fx(n1si2) += hgfx[1]; mesh.fy(n1si2) += hgfy[1]; mesh.fz(n1si2) += hgfz[1]; mesh.fx(n2si2) += hgfx[2]; mesh.fy(n2si2) += hgfy[2]; mesh.fz(n2si2) += hgfz[2]; mesh.fx(n3si2) += hgfx[3]; mesh.fy(n3si2) += hgfy[3]; mesh.fz(n3si2) += hgfz[3]; mesh.fx(n4si2) += hgfx[4]; mesh.fy(n4si2) += hgfy[4]; mesh.fz(n4si2) += hgfz[4]; mesh.fx(n5si2) += hgfx[5]; mesh.fy(n5si2) += hgfy[5]; mesh.fz(n5si2) += hgfz[5]; mesh.fx(n6si2) += hgfx[6]; mesh.fy(n6si2) += hgfy[6]; mesh.fz(n6si2) += hgfz[6]; mesh.fx(n7si2) += hgfx[7]; mesh.fy(n7si2) += hgfy[7]; mesh.fz(n7si2) += hgfz[7]; } } static inline void CalcFBHourglassForceForElems_gpu(Real_t* determ, Real_t* x8n, Real_t* y8n, Real_t* z8n, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t hourg, cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); Real_t *fx_elem, *fy_elem, *fz_elem; CUDA(cudaMalloc(&fx_elem, numElem * 8 * sizeof(Real_t))); CUDA(cudaMalloc(&fy_elem, numElem * 8 * sizeof(Real_t))); CUDA(cudaMalloc(&fz_elem, numElem * 8 * sizeof(Real_t))); dim3 dimBlock = dim3(256, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem * 8, dimBlock.x), 1, 1); CalcFBHourglassForceForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( determ, x8n, y8n, z8n, dvdx, dvdy, dvdz, hourg, numElem, meshGPU.m_nodelist, meshGPU.m_ss, meshGPU.m_elemMass, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, fx_elem, fy_elem, fz_elem); CUDA_DEBUGSYNC; dimGrid = dim3(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); AddNodeForcesFromElems2_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( mesh.numNode(), meshGPU.m_nodeElemCount, meshGPU.m_nodeElemCornerList, fx_elem, fy_elem, fz_elem, meshGPU.m_fx, meshGPU.m_fy, meshGPU.m_fz); CUDA_DEBUGSYNC; // CUDA( cudaFree(fx_elem) ); // CUDA( cudaFree(fy_elem) ); // CUDA( cudaFree(fz_elem) ); } __global__ void CalcHourglassControlForElems_kernel(Int_t numElem, Index_t* nodelist, Real_t* x, Real_t* y, Real_t* z, Real_t* determ, Real_t* volo, Real_t* v, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz, Real_t* x8n, Real_t* y8n, Real_t* z8n) { Real_t x1, y1, z1; Real_t pfx, pfy, pfz; // THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!! unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; #if 0 unsigned int node=tid>>5; unsigned int elem=bid<<5 + (tid - (node<<5)); #elif 1 unsigned int node = tid / 32; unsigned int elem = bid * 32 + (tid - node * 32); #elif 0 unsigned int node = tid / 32; ; unsigned int elem = bid * 32 + (tid & 0x1F); #elif 0 unsigned int node = tid / 32; unsigned int elem = bid << 5 + (tid & 0x1F); #elif 0 unsigned int node = tid >> 5; unsigned int elem = bid * 32 + (tid & 0x1F); #else unsigned int node = tid >> 5; unsigned int elem = bid << 5 + (tid & 0x1F); #endif if (elem >= numElem) elem = numElem - 1; // don't return -- need thread to participate in sync operations Index_t idx = elem + numElem * node; Index_t ni = nodelist[idx]; x1 = x[ni]; y1 = y[ni]; z1 = z[ni]; CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1, node); /* load into temporary storage for FB Hour Glass control */ dvdx[idx] = pfx; dvdy[idx] = pfy; dvdz[idx] = pfz; x8n[idx] = x1; y8n[idx] = y1; z8n[idx] = z1; // if (node==0) determ[elem] = volo[elem] * v[elem]; #if 0 // JDC /* Do a check for negative volumes */ if ( mesh.v(i) <= Real_t(0.0) ) { exit(VolumeError) ; } #endif } static inline void CalcHourglassControlForElems_gpu(Real_t determ[], Real_t hgcoef, cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); Index_t numElem8 = numElem * 8; Real_t *dvdx, *dvdy, *dvdz; Real_t *x8n, *y8n, *z8n; CUDA(cudaMalloc(&dvdx, sizeof(Real_t) * numElem8)); CUDA(cudaMalloc(&dvdy, sizeof(Real_t) * numElem8)); CUDA(cudaMalloc(&dvdz, sizeof(Real_t) * numElem8)); CUDA(cudaMalloc(&x8n, sizeof(Real_t) * numElem8)); CUDA(cudaMalloc(&y8n, sizeof(Real_t) * numElem8)); CUDA(cudaMalloc(&z8n, sizeof(Real_t) * numElem8)); dim3 dimBlock = dim3(256, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem * 8, dimBlock.x), 1, 1); CalcHourglassControlForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, determ, meshGPU.m_volo, meshGPU.m_v, dvdx, dvdy, dvdz, x8n, y8n, z8n); CUDA_DEBUGSYNC; // JDC -- need a reduction to check for negative volumes if (hgcoef > Real_t(0.)) { CalcFBHourglassForceForElems_gpu(determ, x8n, y8n, z8n, dvdx, dvdy, dvdz, hgcoef, stream_app); // kernel exec } // CUDA( cudaFree(dvdx) ); // CUDA( cudaFree(dvdy) ); // CUDA( cudaFree(dvdz) ); // CUDA( cudaFree(x8n) ); // CUDA( cudaFree(y8n) ); // CUDA( cudaFree(z8n) ); return; } static inline void CalcHourglassControlForElems_cpu(Real_t determ[], Real_t hgcoef) { Index_t i, ii, jj; Real_t x1[8], y1[8], z1[8]; Real_t pfx[8], pfy[8], pfz[8]; Index_t numElem = mesh.numElem(); Index_t numElem8 = numElem * 8; Real_t* dvdx = Allocate<Real_t>(numElem8); Real_t* dvdy = Allocate<Real_t>(numElem8); Real_t* dvdz = Allocate<Real_t>(numElem8); Real_t* x8n = Allocate<Real_t>(numElem8); Real_t* y8n = Allocate<Real_t>(numElem8); Real_t* z8n = Allocate<Real_t>(numElem8); /* start loop over elements */ for (i = 0; i < numElem; ++i) { CollectDomainNodesToElemNodes(i, x1, y1, z1); CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1); /* load into temporary storage for FB Hour Glass control */ for (ii = 0; ii < 8; ++ii) { jj = 8 * i + ii; dvdx[jj] = pfx[ii]; dvdy[jj] = pfy[ii]; dvdz[jj] = pfz[ii]; x8n[jj] = x1[ii]; y8n[jj] = y1[ii]; z8n[jj] = z1[ii]; } determ[i] = mesh.volo(i) * mesh.v(i); /* Do a check for negative volumes */ if (mesh.v(i) <= Real_t(0.0)) { exit(VolumeError); } } if (hgcoef > Real_t(0.)) { CalcFBHourglassForceForElems_cpu(determ, x8n, y8n, z8n, dvdx, dvdy, dvdz, hgcoef); } Release(&z8n); Release(&y8n); Release(&x8n); Release(&dvdz); Release(&dvdy); Release(&dvdx); return; } static inline void CalcHourglassControlForElems(Real_t determ[], Real_t hgcoef, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); FC(nodelist); FC(ss); FC(elemMass); FC(xd); FC(yd); FC(zd); FC(fx); FC(fy); FC(fz); CalcHourglassControlForElems_cpu(determ, hgcoef); SG(fx); SG(fy); SG(fz); } else { FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); FG(nodelist); FG(ss); FG(elemMass); FG(xd); FG(yd); FG(zd); FG(fx); FG(fy); FG(fz); CalcHourglassControlForElems_gpu(determ, hgcoef, stream_app); // kernel exec SC(fx); SC(fy); SC(fz); } } static inline void CalcVolumeForceForElems_gpu(cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t hgcoef = mesh.hgcoef(); Real_t *sigxx, *sigyy, *sigzz, *determ; int badvol; CUDA(cudaMalloc(&sigxx, numElem * sizeof(Real_t))); CUDA(cudaMalloc(&sigyy, numElem * sizeof(Real_t))); CUDA(cudaMalloc(&sigzz, numElem * sizeof(Real_t))); CUDA(cudaMalloc(&determ, numElem * sizeof(Real_t))); /* Sum contributions to total stress tensor */ InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 0, stream_app); // call elemlib stress integration loop to produce nodal forces from // material stresses. IntegrateStressForElems(numElem, sigxx, sigyy, sigzz, determ, badvol, 0, stream_app); // CUDA( cudaFree(sigxx) ); // CUDA( cudaFree(sigyy) ); // CUDA( cudaFree(sigzz) ); // check for negative element volume if (badvol) exit(VolumeError); CalcHourglassControlForElems(determ, hgcoef, 0, stream_app); // CUDA( cudaFree(determ) ); } } static inline void CalcVolumeForceForElems_cpu(cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t hgcoef = mesh.hgcoef(); Real_t* sigxx = Allocate<Real_t>(numElem); Real_t* sigyy = Allocate<Real_t>(numElem); Real_t* sigzz = Allocate<Real_t>(numElem); Real_t* determ = Allocate<Real_t>(numElem); int badvol; /* Sum contributions to total stress tensor */ InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 1, stream_app); // call elemlib stress integration loop to produce nodal forces from // material stresses. IntegrateStressForElems(numElem, sigxx, sigyy, sigzz, determ, badvol, 1, stream_app); Release(&sigzz); Release(&sigyy); Release(&sigxx); // check for negative element volume if (badvol) exit(VolumeError); #if 0 for ( Index_t k=0 ; k<numElem ; ++k ) { if (determ[k] <= Real_t(0.0)) { exit(VolumeError) ; } } #endif CalcHourglassControlForElems(determ, hgcoef, 1, stream_app); Release(&determ); } } static inline void CalcForceForNodes_gpu(cudaStream_t stream_app) { /* Calcforce calls partial, force, hourq */ CalcVolumeForceForElems_gpu(stream_app); /* Calculate Nodal Forces at domain boundaries */ /* problem->commSBN->Transfer(CommSBN::forces); */ } static inline void CalcForceForNodes_cpu(cudaStream_t stream_app) { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { mesh.fx(i) = Real_t(0.0); mesh.fy(i) = Real_t(0.0); mesh.fz(i) = Real_t(0.0); } /* Calcforce calls partial, force, hourq */ CalcVolumeForceForElems_cpu(stream_app); /* Calculate Nodal Forces at domain boundaries */ /* problem->commSBN->Transfer(CommSBN::forces); */ } static inline void CalcForceForNodes(int useCPU, cudaStream_t stream_app) { if (useCPU) { CalcForceForNodes_cpu(stream_app); } else { CalcForceForNodes_gpu(stream_app); } } __global__ void CalcAccelerationForNodes_kernel(int numNode, Real_t* xdd, Real_t* ydd, Real_t* zdd, Real_t* fx, Real_t* fy, Real_t* fz, Real_t* nodalMass) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { xdd[i] = fx[i] / nodalMass[i]; ydd[i] = fy[i] / nodalMass[i]; zdd[i] = fz[i] / nodalMass[i]; } } static inline void CalcAccelerationForNodes_gpu(cudaStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); CalcAccelerationForNodes_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( mesh.numNode(), meshGPU.m_xdd, meshGPU.m_ydd, meshGPU.m_zdd, meshGPU.m_fx, meshGPU.m_fy, meshGPU.m_fz, meshGPU.m_nodalMass); CUDA_DEBUGSYNC; } static inline void CalcAccelerationForNodes_cpu() { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { mesh.xdd(i) = mesh.fx(i) / mesh.nodalMass(i); mesh.ydd(i) = mesh.fy(i) / mesh.nodalMass(i); mesh.zdd(i) = mesh.fz(i) / mesh.nodalMass(i); } } static inline void CalcAccelerationForNodes(int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(fx); FC(fy); FC(fz); FC(nodalMass); CalcAccelerationForNodes_cpu(); SG(xdd); SG(ydd); SG(zdd); } else { FG(fx); FG(fy); FG(fz); FG(nodalMass); CalcAccelerationForNodes_gpu(stream_app); SC(xdd); SC(ydd); SC(zdd); } } __global__ void ApplyAccelerationBoundaryConditionsForNodes_kernel( int numNodeBC, Real_t* xdd, Real_t* ydd, Real_t* zdd, Index_t* symmX, Index_t* symmY, Index_t* symmZ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNodeBC) { xdd[symmX[i]] = Real_t(0.0); ydd[symmY[i]] = Real_t(0.0); zdd[symmZ[i]] = Real_t(0.0); } } static inline void ApplyAccelerationBoundaryConditionsForNodes_gpu( cudaStream_t stream_app) { Index_t numNodeBC = (mesh.sizeX() + 1) * (mesh.sizeX() + 1); dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(numNodeBC, dimBlock.x), 1, 1); ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numNodeBC, meshGPU.m_xdd, meshGPU.m_ydd, meshGPU.m_zdd, meshGPU.m_symmX, meshGPU.m_symmY, meshGPU.m_symmZ); CUDA_DEBUGSYNC; } static inline void ApplyAccelerationBoundaryConditionsForNodes_cpu() { Index_t numNodeBC = (mesh.sizeX() + 1) * (mesh.sizeX() + 1); for (Index_t i = 0; i < numNodeBC; ++i) mesh.xdd(mesh.symmX(i)) = Real_t(0.0); for (Index_t i = 0; i < numNodeBC; ++i) mesh.ydd(mesh.symmY(i)) = Real_t(0.0); for (Index_t i = 0; i < numNodeBC; ++i) mesh.zdd(mesh.symmZ(i)) = Real_t(0.0); } static inline void ApplyAccelerationBoundaryConditionsForNodes( int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(xdd); FC(ydd); FC(zdd); FC(symmX); FC(symmY); FC(symmZ); ApplyAccelerationBoundaryConditionsForNodes_cpu(); SG(xdd); SG(ydd); SG(zdd); } else { FG(xdd); FG(ydd); FG(zdd); FG(symmX); FG(symmY); FG(symmZ); ApplyAccelerationBoundaryConditionsForNodes_gpu(stream_app); SC(xdd); SC(ydd); SC(zdd); } } __global__ void CalcVelocityForNodes_kernel(int numNode, const Real_t dt, const Real_t u_cut, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* xdd, Real_t* ydd, Real_t* zdd) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { Real_t xdtmp, ydtmp, zdtmp; xdtmp = xd[i] + xdd[i] * dt; if (FABS(xdtmp) < u_cut) xdtmp = 0.0; // Real_t(0.0); xd[i] = xdtmp; ydtmp = yd[i] + ydd[i] * dt; if (FABS(ydtmp) < u_cut) ydtmp = Real_t(0.0); yd[i] = ydtmp; zdtmp = zd[i] + zdd[i] * dt; if (FABS(zdtmp) < u_cut) zdtmp = Real_t(0.0); zd[i] = zdtmp; } } static inline void CalcVelocityForNodes_gpu(const Real_t dt, const Real_t u_cut, cudaStream_t stream_app) { dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); CalcVelocityForNodes_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( mesh.numNode(), dt, u_cut, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, meshGPU.m_xdd, meshGPU.m_ydd, meshGPU.m_zdd); CUDA_DEBUGSYNC; } static inline void CalcVelocityForNodes_cpu(const Real_t dt, const Real_t u_cut) { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { Real_t xdtmp, ydtmp, zdtmp; xdtmp = mesh.xd(i) + mesh.xdd(i) * dt; if (FABS(xdtmp) < u_cut) xdtmp = Real_t(0.0); mesh.xd(i) = xdtmp; ydtmp = mesh.yd(i) + mesh.ydd(i) * dt; if (FABS(ydtmp) < u_cut) ydtmp = Real_t(0.0); mesh.yd(i) = ydtmp; zdtmp = mesh.zd(i) + mesh.zdd(i) * dt; if (FABS(zdtmp) < u_cut) zdtmp = Real_t(0.0); mesh.zd(i) = zdtmp; } } static inline void CalcVelocityForNodes(const Real_t dt, const Real_t u_cut, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(xd); FC(yd); FC(zd); FC(xdd); FC(ydd); FC(zdd); CalcVelocityForNodes_cpu(dt, u_cut); SG(xd); SG(yd); SG(zd); } else { FG(xd); FG(yd); FG(zd); FG(xdd); FG(ydd); FG(zdd); CalcVelocityForNodes_gpu(dt, u_cut, stream_app); SC(xd); SC(yd); SC(zd); } } __global__ void CalcPositionForNodes_kernel(int numNode, Real_t dt, Real_t* x, Real_t* y, Real_t* z, Real_t* xd, Real_t* yd, Real_t* zd) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numNode) { x[i] += xd[i] * dt; y[i] += yd[i] * dt; z[i] += zd[i] * dt; } } static inline void CalcPositionForNodes_gpu(const Real_t dt, cudaStream_t stream_app) { dim3 dimBlock(BLOCKSIZE, 1, 1); dim3 dimGrid(PAD_DIV(mesh.numNode(), dimBlock.x), 1, 1); CalcPositionForNodes_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( mesh.numNode(), dt, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd); CUDA_DEBUGSYNC; } static inline void CalcPositionForNodes_cpu(const Real_t dt) { Index_t numNode = mesh.numNode(); for (Index_t i = 0; i < numNode; ++i) { mesh.x(i) += mesh.xd(i) * dt; mesh.y(i) += mesh.yd(i) * dt; mesh.z(i) += mesh.zd(i) * dt; } } static inline void CalcPositionForNodes(const Real_t dt, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); CalcPositionForNodes_cpu(dt); SG(x); SG(y); SG(z); } else { FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); CalcPositionForNodes_gpu(dt, stream_app); SC(x); SC(y); SC(z); } } static inline void LagrangeNodal(int useCPU, cudaStream_t stream_app) { const Real_t delt = mesh.deltatime(); Real_t u_cut = mesh.u_cut(); /* time of boundary condition evaluation is beginning of step for force and * acceleration boundary conditions. */ CalcForceForNodes(/*0*/ useCPU, stream_app); CalcAccelerationForNodes(useCPU, stream_app); ApplyAccelerationBoundaryConditionsForNodes(useCPU, stream_app); CalcVelocityForNodes(delt, u_cut, useCPU, stream_app); CalcPositionForNodes(delt, useCPU, stream_app); return; } __host__ __device__ static inline Real_t CalcElemVolume(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t x6, const Real_t x7, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t y6, const Real_t y7, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, const Real_t z6, const Real_t z7) { Real_t twelveth = Real_t(1.0) / Real_t(12.0); Real_t dx61 = x6 - x1; Real_t dy61 = y6 - y1; Real_t dz61 = z6 - z1; Real_t dx70 = x7 - x0; Real_t dy70 = y7 - y0; Real_t dz70 = z7 - z0; Real_t dx63 = x6 - x3; Real_t dy63 = y6 - y3; Real_t dz63 = z6 - z3; Real_t dx20 = x2 - x0; Real_t dy20 = y2 - y0; Real_t dz20 = z2 - z0; Real_t dx50 = x5 - x0; Real_t dy50 = y5 - y0; Real_t dz50 = z5 - z0; Real_t dx64 = x6 - x4; Real_t dy64 = y6 - y4; Real_t dz64 = z6 - z4; Real_t dx31 = x3 - x1; Real_t dy31 = y3 - y1; Real_t dz31 = z3 - z1; Real_t dx72 = x7 - x2; Real_t dy72 = y7 - y2; Real_t dz72 = z7 - z2; Real_t dx43 = x4 - x3; Real_t dy43 = y4 - y3; Real_t dz43 = z4 - z3; Real_t dx57 = x5 - x7; Real_t dy57 = y5 - y7; Real_t dz57 = z5 - z7; Real_t dx14 = x1 - x4; Real_t dy14 = y1 - y4; Real_t dz14 = z1 - z4; Real_t dx25 = x2 - x5; Real_t dy25 = y2 - y5; Real_t dz25 = z2 - z5; #define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \ ((x1) * ((y2) * (z3) - (z2) * (y3)) + (x2) * ((z1) * (y3) - (y1) * (z3)) + \ (x3) * ((y1) * (z2) - (z1) * (y2))) Real_t volume = TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20, dy31 + dy72, dy63, dy20, dz31 + dz72, dz63, dz20) + TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70, dy43 + dy57, dy64, dy70, dz43 + dz57, dz64, dz70) + TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50, dy14 + dy25, dy61, dy50, dz14 + dz25, dz61, dz50); #undef TRIPLE_PRODUCT volume *= twelveth; return volume; } __host__ __device__ static inline Real_t CalcElemVolume(const Real_t x[8], const Real_t y[8], const Real_t z[8]) { return CalcElemVolume(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]); } __host__ __device__ static inline Real_t AreaFace(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3) { Real_t fx = (x2 - x0) - (x3 - x1); Real_t fy = (y2 - y0) - (y3 - y1); Real_t fz = (z2 - z0) - (z3 - z1); Real_t gx = (x2 - x0) + (x3 - x1); Real_t gy = (y2 - y0) + (y3 - y1); Real_t gz = (z2 - z0) + (z3 - z1); Real_t area = (fx * fx + fy * fy + fz * fz) * (gx * gx + gy * gy + gz * gz) - (fx * gx + fy * gy + fz * gz) * (fx * gx + fy * gy + fz * gz); return area; } __host__ __device__ static inline Real_t CalcElemCharacteristicLength( const Real_t x[8], const Real_t y[8], const Real_t z[8], const Real_t volume) { Real_t a, charLength = Real_t(0.0); a = AreaFace(x[0], x[1], x[2], x[3], y[0], y[1], y[2], y[3], z[0], z[1], z[2], z[3]); charLength = FMAX(a, charLength); a = AreaFace(x[4], x[5], x[6], x[7], y[4], y[5], y[6], y[7], z[4], z[5], z[6], z[7]); charLength = FMAX(a, charLength); a = AreaFace(x[0], x[1], x[5], x[4], y[0], y[1], y[5], y[4], z[0], z[1], z[5], z[4]); charLength = FMAX(a, charLength); a = AreaFace(x[1], x[2], x[6], x[5], y[1], y[2], y[6], y[5], z[1], z[2], z[6], z[5]); charLength = FMAX(a, charLength); a = AreaFace(x[2], x[3], x[7], x[6], y[2], y[3], y[7], y[6], z[2], z[3], z[7], z[6]); charLength = FMAX(a, charLength); a = AreaFace(x[3], x[0], x[4], x[7], y[3], y[0], y[4], y[7], z[3], z[0], z[4], z[7]); charLength = FMAX(a, charLength); charLength = Real_t(4.0) * volume / SQRT(charLength); return charLength; } __host__ __device__ static inline void CalcElemVelocityGradient( const Real_t* const xvel, const Real_t* const yvel, const Real_t* const zvel, const Real_t b[][8], const Real_t detJ, Real_t* const d) { const Real_t inv_detJ = Real_t(1.0) / detJ; Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz; const Real_t* const pfx = b[0]; const Real_t* const pfy = b[1]; const Real_t* const pfz = b[2]; d[0] = inv_detJ * (pfx[0] * (xvel[0] - xvel[6]) + pfx[1] * (xvel[1] - xvel[7]) + pfx[2] * (xvel[2] - xvel[4]) + pfx[3] * (xvel[3] - xvel[5])); d[1] = inv_detJ * (pfy[0] * (yvel[0] - yvel[6]) + pfy[1] * (yvel[1] - yvel[7]) + pfy[2] * (yvel[2] - yvel[4]) + pfy[3] * (yvel[3] - yvel[5])); d[2] = inv_detJ * (pfz[0] * (zvel[0] - zvel[6]) + pfz[1] * (zvel[1] - zvel[7]) + pfz[2] * (zvel[2] - zvel[4]) + pfz[3] * (zvel[3] - zvel[5])); dyddx = inv_detJ * (pfx[0] * (yvel[0] - yvel[6]) + pfx[1] * (yvel[1] - yvel[7]) + pfx[2] * (yvel[2] - yvel[4]) + pfx[3] * (yvel[3] - yvel[5])); dxddy = inv_detJ * (pfy[0] * (xvel[0] - xvel[6]) + pfy[1] * (xvel[1] - xvel[7]) + pfy[2] * (xvel[2] - xvel[4]) + pfy[3] * (xvel[3] - xvel[5])); dzddx = inv_detJ * (pfx[0] * (zvel[0] - zvel[6]) + pfx[1] * (zvel[1] - zvel[7]) + pfx[2] * (zvel[2] - zvel[4]) + pfx[3] * (zvel[3] - zvel[5])); dxddz = inv_detJ * (pfz[0] * (xvel[0] - xvel[6]) + pfz[1] * (xvel[1] - xvel[7]) + pfz[2] * (xvel[2] - xvel[4]) + pfz[3] * (xvel[3] - xvel[5])); dzddy = inv_detJ * (pfy[0] * (zvel[0] - zvel[6]) + pfy[1] * (zvel[1] - zvel[7]) + pfy[2] * (zvel[2] - zvel[4]) + pfy[3] * (zvel[3] - zvel[5])); dyddz = inv_detJ * (pfz[0] * (yvel[0] - yvel[6]) + pfz[1] * (yvel[1] - yvel[7]) + pfz[2] * (yvel[2] - yvel[4]) + pfz[3] * (yvel[3] - yvel[5])); d[5] = Real_t(.5) * (dxddy + dyddx); d[4] = Real_t(.5) * (dxddz + dzddx); d[3] = Real_t(.5) * (dzddy + dyddz); } __global__ void CalcKinematicsForElems_kernel(Index_t numElem, Real_t dt, Index_t* nodelist, Real_t* volo, Real_t* v, Real_t* x, Real_t* y, Real_t* z, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* vnew, Real_t* delv, Real_t* arealg, Real_t* dxx, Real_t* dyy, Real_t* dzz) { Real_t B[3][8]; /** shape function derivatives */ Real_t D[6]; Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; Real_t xd_local[8]; Real_t yd_local[8]; Real_t zd_local[8]; Real_t detJ = Real_t(0.0); int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numElem) { Real_t volume; Real_t relativeVolume; // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = nodelist[k + lnode * numElem]; x_local[lnode] = x[gnode]; y_local[lnode] = y[gnode]; z_local[lnode] = z[gnode]; } // volume calculations volume = CalcElemVolume(x_local, y_local, z_local); relativeVolume = volume / volo[k]; vnew[k] = relativeVolume; delv[k] = relativeVolume - v[k]; // set characteristic length arealg[k] = CalcElemCharacteristicLength(x_local, y_local, z_local, volume); // get nodal velocities from global array and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = nodelist[k + lnode * numElem]; xd_local[lnode] = xd[gnode]; yd_local[lnode] = yd[gnode]; zd_local[lnode] = zd[gnode]; } Real_t dt2 = Real_t(0.5) * dt; for (Index_t j = 0; j < 8; ++j) { x_local[j] -= dt2 * xd_local[j]; y_local[j] -= dt2 * yd_local[j]; z_local[j] -= dt2 * zd_local[j]; } CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &detJ); CalcElemVelocityGradient(xd_local, yd_local, zd_local, B, detJ, D); // put velocity gradient quantities into their global arrays. dxx[k] = D[0]; dyy[k] = D[1]; dzz[k] = D[2]; } } static inline void CalcKinematicsForElems_gpu(Index_t numElem, Real_t dt, cudaStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); CalcKinematicsForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, dt, meshGPU.m_nodelist, meshGPU.m_volo, meshGPU.m_v, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, meshGPU.m_vnew, meshGPU.m_delv, meshGPU.m_arealg, meshGPU.m_dxx, meshGPU.m_dyy, meshGPU.m_dzz); CUDA_DEBUGSYNC; } static inline void CalcKinematicsForElems_cpu(Index_t numElem, Real_t dt) { Real_t B[3][8]; /** shape function derivatives */ Real_t D[6]; Real_t x_local[8]; Real_t y_local[8]; Real_t z_local[8]; Real_t xd_local[8]; Real_t yd_local[8]; Real_t zd_local[8]; Real_t detJ = Real_t(0.0); // loop over all elements for (Index_t k = 0; k < numElem; ++k) { Real_t volume; Real_t relativeVolume; // get nodal coordinates from global arrays and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); x_local[lnode] = mesh.x(gnode); y_local[lnode] = mesh.y(gnode); z_local[lnode] = mesh.z(gnode); } // volume calculations volume = CalcElemVolume(x_local, y_local, z_local); relativeVolume = volume / mesh.volo(k); mesh.vnew(k) = relativeVolume; mesh.delv(k) = relativeVolume - mesh.v(k); // set characteristic length mesh.arealg(k) = CalcElemCharacteristicLength(x_local, y_local, z_local, volume); // get nodal velocities from global array and copy into local arrays. for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(k, lnode); xd_local[lnode] = mesh.xd(gnode); yd_local[lnode] = mesh.yd(gnode); zd_local[lnode] = mesh.zd(gnode); } Real_t dt2 = Real_t(0.5) * dt; for (Index_t j = 0; j < 8; ++j) { x_local[j] -= dt2 * xd_local[j]; y_local[j] -= dt2 * yd_local[j]; z_local[j] -= dt2 * zd_local[j]; } CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &detJ); CalcElemVelocityGradient(xd_local, yd_local, zd_local, B, detJ, D); // put velocity gradient quantities into their global arrays. mesh.dxx(k) = D[0]; mesh.dyy(k) = D[1]; mesh.dzz(k) = D[2]; } } static inline void CalcKinematicsForElems(Index_t numElem, Real_t dt, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(nodelist); FC(volo); FC(v); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); CalcKinematicsForElems_cpu(numElem, dt); SG(vnew); SG(delv); SG(arealg); SG(dxx); SG(dyy); SG(dzz); } else { FG(nodelist); FG(volo); FG(v); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); CalcKinematicsForElems_gpu(numElem, dt, stream_app); SC(vnew); SC(delv); SC(arealg); SC(dxx); SC(dyy); SC(dzz); } } __global__ void CalcLagrangeElementsPart2_kernel(Index_t numElem, Real_t* dxx, Real_t* dyy, Real_t* dzz, Real_t* vdov) { int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numElem) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdovNew = dxx[k] + dyy[k] + dzz[k]; Real_t vdovthird = vdovNew / Real_t(3.0); // make the rate of deformation tensor deviatoric vdov[k] = vdovNew; dxx[k] -= vdovthird; dyy[k] -= vdovthird; dzz[k] -= vdovthird; // See if any volumes are negative, and take appropriate action. // if (mesh.vnew(k) <= Real_t(0.0)) //{ // exit(VolumeError) ; //} } } static inline void CalcLagrangeElementsPart2_gpu(cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); CalcLagrangeElementsPart2_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, meshGPU.m_dxx, meshGPU.m_dyy, meshGPU.m_dzz, meshGPU.m_vdov); CUDA_DEBUGSYNC; } static inline void CalcLagrangeElementsPart2_cpu() { Index_t numElem = mesh.numElem(); // element loop to do some stuff not included in the elemlib function. for (Index_t k = 0; k < numElem; ++k) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdov = mesh.dxx(k) + mesh.dyy(k) + mesh.dzz(k); Real_t vdovthird = vdov / Real_t(3.0); // make the rate of deformation tensor deviatoric mesh.vdov(k) = vdov; mesh.dxx(k) -= vdovthird; mesh.dyy(k) -= vdovthird; mesh.dzz(k) -= vdovthird; // See if any volumes are negative, and take appropriate action. if (mesh.vnew(k) <= Real_t(0.0)) { exit(VolumeError); } } } static inline void CalcLagrangeElementsPart2(int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(dxx); FC(dyy); FC(dzz); CalcLagrangeElementsPart2_cpu(); SG(vdov); SG(dxx); SG(dyy); SG(dzz); } else { FG(dxx); FG(dyy); FG(dzz); CalcLagrangeElementsPart2_gpu(stream_app); SC(vdov); SC(dxx); SC(dyy); SC(dzz); } } static inline void CalcLagrangeElements(Real_t deltatime, int useCPU, cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem > 0) { CalcKinematicsForElems(numElem, deltatime, useCPU, stream_app); CalcLagrangeElementsPart2(useCPU, stream_app); } } __global__ void CalcMonotonicQGradientsForElems_kernel(Index_t numElem, Index_t* nodelist, Real_t* x, Real_t* y, Real_t* z, Real_t* xd, Real_t* yd, Real_t* zd, Real_t* volo, Real_t* vnew, Real_t* delx_zeta, Real_t* delv_zeta, Real_t* delx_xi, Real_t* delv_xi, Real_t* delx_eta, Real_t* delv_eta) { #define SUM4(a, b, c, d) (a + b + c + d) const Real_t ptiny = Real_t(1.e-36); int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElem) { Real_t ax, ay, az; Real_t dxv, dyv, dzv; Index_t n0 = nodelist[i + 0 * numElem]; Index_t n1 = nodelist[i + 1 * numElem]; Index_t n2 = nodelist[i + 2 * numElem]; Index_t n3 = nodelist[i + 3 * numElem]; Index_t n4 = nodelist[i + 4 * numElem]; Index_t n5 = nodelist[i + 5 * numElem]; Index_t n6 = nodelist[i + 6 * numElem]; Index_t n7 = nodelist[i + 7 * numElem]; Real_t x0 = x[n0]; Real_t x1 = x[n1]; Real_t x2 = x[n2]; Real_t x3 = x[n3]; Real_t x4 = x[n4]; Real_t x5 = x[n5]; Real_t x6 = x[n6]; Real_t x7 = x[n7]; Real_t y0 = y[n0]; Real_t y1 = y[n1]; Real_t y2 = y[n2]; Real_t y3 = y[n3]; Real_t y4 = y[n4]; Real_t y5 = y[n5]; Real_t y6 = y[n6]; Real_t y7 = y[n7]; Real_t z0 = z[n0]; Real_t z1 = z[n1]; Real_t z2 = z[n2]; Real_t z3 = z[n3]; Real_t z4 = z[n4]; Real_t z5 = z[n5]; Real_t z6 = z[n6]; Real_t z7 = z[n7]; Real_t xv0 = xd[n0]; Real_t xv1 = xd[n1]; Real_t xv2 = xd[n2]; Real_t xv3 = xd[n3]; Real_t xv4 = xd[n4]; Real_t xv5 = xd[n5]; Real_t xv6 = xd[n6]; Real_t xv7 = xd[n7]; Real_t yv0 = yd[n0]; Real_t yv1 = yd[n1]; Real_t yv2 = yd[n2]; Real_t yv3 = yd[n3]; Real_t yv4 = yd[n4]; Real_t yv5 = yd[n5]; Real_t yv6 = yd[n6]; Real_t yv7 = yd[n7]; Real_t zv0 = zd[n0]; Real_t zv1 = zd[n1]; Real_t zv2 = zd[n2]; Real_t zv3 = zd[n3]; Real_t zv4 = zd[n4]; Real_t zv5 = zd[n5]; Real_t zv6 = zd[n6]; Real_t zv7 = zd[n7]; Real_t vol = volo[i] * vnew[i]; Real_t norm = Real_t(1.0) / (vol + ptiny); Real_t dxj = Real_t(-0.25) * (SUM4(x0, x1, x5, x4) - SUM4(x3, x2, x6, x7)); Real_t dyj = Real_t(-0.25) * (SUM4(y0, y1, y5, y4) - SUM4(y3, y2, y6, y7)); Real_t dzj = Real_t(-0.25) * (SUM4(z0, z1, z5, z4) - SUM4(z3, z2, z6, z7)); Real_t dxi = Real_t(0.25) * (SUM4(x1, x2, x6, x5) - SUM4(x0, x3, x7, x4)); Real_t dyi = Real_t(0.25) * (SUM4(y1, y2, y6, y5) - SUM4(y0, y3, y7, y4)); Real_t dzi = Real_t(0.25) * (SUM4(z1, z2, z6, z5) - SUM4(z0, z3, z7, z4)); Real_t dxk = Real_t(0.25) * (SUM4(x4, x5, x6, x7) - SUM4(x0, x1, x2, x3)); Real_t dyk = Real_t(0.25) * (SUM4(y4, y5, y6, y7) - SUM4(y0, y1, y2, y3)); Real_t dzk = Real_t(0.25) * (SUM4(z4, z5, z6, z7) - SUM4(z0, z1, z2, z3)); /* find delvk and delxk ( i cross j ) */ ax = dyi * dzj - dzi * dyj; ay = dzi * dxj - dxi * dzj; az = dxi * dyj - dyi * dxj; delx_zeta[i] = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv4, xv5, xv6, xv7) - SUM4(xv0, xv1, xv2, xv3)); dyv = Real_t(0.25) * (SUM4(yv4, yv5, yv6, yv7) - SUM4(yv0, yv1, yv2, yv3)); dzv = Real_t(0.25) * (SUM4(zv4, zv5, zv6, zv7) - SUM4(zv0, zv1, zv2, zv3)); delv_zeta[i] = ax * dxv + ay * dyv + az * dzv; /* find delxi and delvi ( j cross k ) */ ax = dyj * dzk - dzj * dyk; ay = dzj * dxk - dxj * dzk; az = dxj * dyk - dyj * dxk; delx_xi[i] = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv1, xv2, xv6, xv5) - SUM4(xv0, xv3, xv7, xv4)); dyv = Real_t(0.25) * (SUM4(yv1, yv2, yv6, yv5) - SUM4(yv0, yv3, yv7, yv4)); dzv = Real_t(0.25) * (SUM4(zv1, zv2, zv6, zv5) - SUM4(zv0, zv3, zv7, zv4)); delv_xi[i] = ax * dxv + ay * dyv + az * dzv; /* find delxj and delvj ( k cross i ) */ ax = dyk * dzi - dzk * dyi; ay = dzk * dxi - dxk * dzi; az = dxk * dyi - dyk * dxi; delx_eta[i] = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(-0.25) * (SUM4(xv0, xv1, xv5, xv4) - SUM4(xv3, xv2, xv6, xv7)); dyv = Real_t(-0.25) * (SUM4(yv0, yv1, yv5, yv4) - SUM4(yv3, yv2, yv6, yv7)); dzv = Real_t(-0.25) * (SUM4(zv0, zv1, zv5, zv4) - SUM4(zv3, zv2, zv6, zv7)); delv_eta[i] = ax * dxv + ay * dyv + az * dzv; } #undef SUM4 } static inline void CalcMonotonicQGradientsForElems_gpu( cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); CalcMonotonicQGradientsForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z, meshGPU.m_xd, meshGPU.m_yd, meshGPU.m_zd, meshGPU.m_volo, meshGPU.m_vnew, meshGPU.m_delx_zeta, meshGPU.m_delv_zeta, meshGPU.m_delx_xi, meshGPU.m_delv_xi, meshGPU.m_delx_eta, meshGPU.m_delv_eta); CUDA_DEBUGSYNC; } static inline void CalcMonotonicQGradientsForElems_cpu() { #define SUM4(a, b, c, d) (a + b + c + d) Index_t numElem = mesh.numElem(); const Real_t ptiny = Real_t(1.e-36); for (Index_t i = 0; i < numElem; ++i) { Real_t ax, ay, az; Real_t dxv, dyv, dzv; Index_t n0 = mesh.nodelist(i, 0); Index_t n1 = mesh.nodelist(i, 1); Index_t n2 = mesh.nodelist(i, 2); Index_t n3 = mesh.nodelist(i, 3); Index_t n4 = mesh.nodelist(i, 4); Index_t n5 = mesh.nodelist(i, 5); Index_t n6 = mesh.nodelist(i, 6); Index_t n7 = mesh.nodelist(i, 7); Real_t x0 = mesh.x(n0); Real_t x1 = mesh.x(n1); Real_t x2 = mesh.x(n2); Real_t x3 = mesh.x(n3); Real_t x4 = mesh.x(n4); Real_t x5 = mesh.x(n5); Real_t x6 = mesh.x(n6); Real_t x7 = mesh.x(n7); Real_t y0 = mesh.y(n0); Real_t y1 = mesh.y(n1); Real_t y2 = mesh.y(n2); Real_t y3 = mesh.y(n3); Real_t y4 = mesh.y(n4); Real_t y5 = mesh.y(n5); Real_t y6 = mesh.y(n6); Real_t y7 = mesh.y(n7); Real_t z0 = mesh.z(n0); Real_t z1 = mesh.z(n1); Real_t z2 = mesh.z(n2); Real_t z3 = mesh.z(n3); Real_t z4 = mesh.z(n4); Real_t z5 = mesh.z(n5); Real_t z6 = mesh.z(n6); Real_t z7 = mesh.z(n7); Real_t xv0 = mesh.xd(n0); Real_t xv1 = mesh.xd(n1); Real_t xv2 = mesh.xd(n2); Real_t xv3 = mesh.xd(n3); Real_t xv4 = mesh.xd(n4); Real_t xv5 = mesh.xd(n5); Real_t xv6 = mesh.xd(n6); Real_t xv7 = mesh.xd(n7); Real_t yv0 = mesh.yd(n0); Real_t yv1 = mesh.yd(n1); Real_t yv2 = mesh.yd(n2); Real_t yv3 = mesh.yd(n3); Real_t yv4 = mesh.yd(n4); Real_t yv5 = mesh.yd(n5); Real_t yv6 = mesh.yd(n6); Real_t yv7 = mesh.yd(n7); Real_t zv0 = mesh.zd(n0); Real_t zv1 = mesh.zd(n1); Real_t zv2 = mesh.zd(n2); Real_t zv3 = mesh.zd(n3); Real_t zv4 = mesh.zd(n4); Real_t zv5 = mesh.zd(n5); Real_t zv6 = mesh.zd(n6); Real_t zv7 = mesh.zd(n7); Real_t vol = mesh.volo(i) * mesh.vnew(i); Real_t norm = Real_t(1.0) / (vol + ptiny); Real_t dxj = Real_t(-0.25) * (SUM4(x0, x1, x5, x4) - SUM4(x3, x2, x6, x7)); Real_t dyj = Real_t(-0.25) * (SUM4(y0, y1, y5, y4) - SUM4(y3, y2, y6, y7)); Real_t dzj = Real_t(-0.25) * (SUM4(z0, z1, z5, z4) - SUM4(z3, z2, z6, z7)); Real_t dxi = Real_t(0.25) * (SUM4(x1, x2, x6, x5) - SUM4(x0, x3, x7, x4)); Real_t dyi = Real_t(0.25) * (SUM4(y1, y2, y6, y5) - SUM4(y0, y3, y7, y4)); Real_t dzi = Real_t(0.25) * (SUM4(z1, z2, z6, z5) - SUM4(z0, z3, z7, z4)); Real_t dxk = Real_t(0.25) * (SUM4(x4, x5, x6, x7) - SUM4(x0, x1, x2, x3)); Real_t dyk = Real_t(0.25) * (SUM4(y4, y5, y6, y7) - SUM4(y0, y1, y2, y3)); Real_t dzk = Real_t(0.25) * (SUM4(z4, z5, z6, z7) - SUM4(z0, z1, z2, z3)); /* find delvk and delxk ( i cross j ) */ ax = dyi * dzj - dzi * dyj; ay = dzi * dxj - dxi * dzj; az = dxi * dyj - dyi * dxj; mesh.delx_zeta(i) = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv4, xv5, xv6, xv7) - SUM4(xv0, xv1, xv2, xv3)); dyv = Real_t(0.25) * (SUM4(yv4, yv5, yv6, yv7) - SUM4(yv0, yv1, yv2, yv3)); dzv = Real_t(0.25) * (SUM4(zv4, zv5, zv6, zv7) - SUM4(zv0, zv1, zv2, zv3)); mesh.delv_zeta(i) = ax * dxv + ay * dyv + az * dzv; /* find delxi and delvi ( j cross k ) */ ax = dyj * dzk - dzj * dyk; ay = dzj * dxk - dxj * dzk; az = dxj * dyk - dyj * dxk; mesh.delx_xi(i) = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(0.25) * (SUM4(xv1, xv2, xv6, xv5) - SUM4(xv0, xv3, xv7, xv4)); dyv = Real_t(0.25) * (SUM4(yv1, yv2, yv6, yv5) - SUM4(yv0, yv3, yv7, yv4)); dzv = Real_t(0.25) * (SUM4(zv1, zv2, zv6, zv5) - SUM4(zv0, zv3, zv7, zv4)); mesh.delv_xi(i) = ax * dxv + ay * dyv + az * dzv; /* find delxj and delvj ( k cross i ) */ ax = dyk * dzi - dzk * dyi; ay = dzk * dxi - dxk * dzi; az = dxk * dyi - dyk * dxi; mesh.delx_eta(i) = vol / SQRT(ax * ax + ay * ay + az * az + ptiny); ax *= norm; ay *= norm; az *= norm; dxv = Real_t(-0.25) * (SUM4(xv0, xv1, xv5, xv4) - SUM4(xv3, xv2, xv6, xv7)); dyv = Real_t(-0.25) * (SUM4(yv0, yv1, yv5, yv4) - SUM4(yv3, yv2, yv6, yv7)); dzv = Real_t(-0.25) * (SUM4(zv0, zv1, zv5, zv4) - SUM4(zv3, zv2, zv6, zv7)); mesh.delv_eta(i) = ax * dxv + ay * dyv + az * dzv; } #undef SUM4 } static inline void CalcMonotonicQGradientsForElems(int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(nodelist); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); FC(volo); FC(vnew); CalcMonotonicQGradientsForElems_cpu(); SG(delx_zeta); SG(delv_zeta); SG(delx_xi); SG(delv_xi); SG(delx_eta); SG(delv_eta); } else { FG(nodelist); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); FG(volo); FG(vnew); CalcMonotonicQGradientsForElems_gpu(stream_app); SC(delx_zeta); SC(delv_zeta); SC(delx_xi); SC(delv_xi); SC(delx_eta); SC(delv_eta); } } __global__ void CalcMonotonicQRegionForElems_kernel(Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, Index_t* matElemlist, Index_t* elemBC, Index_t* lxim, Index_t* lxip, Index_t* letam, Index_t* letap, Index_t* lzetam, Index_t* lzetap, Real_t* delv_xi, Real_t* delv_eta, Real_t* delv_zeta, Real_t* delx_xi, Real_t* delx_eta, Real_t* delx_zeta, Real_t* vdov, Real_t* elemMass, Real_t* volo, Real_t* vnew, Real_t* qq, Real_t* ql) { int ielem = blockDim.x * blockIdx.x + threadIdx.x; if (ielem < elength) { Real_t qlin, qquad; Real_t phixi, phieta, phizeta; Index_t i = matElemlist[ielem]; Int_t bcMask = elemBC[i]; Real_t delvm, delvp; /* phixi */ Real_t norm = Real_t(1.) / (delv_xi[i] + ptiny); switch (bcMask & XI_M) { case 0: delvm = delv_xi[lxim[i]]; break; case XI_M_SYMM: delvm = delv_xi[i]; break; case XI_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & XI_P) { case 0: delvp = delv_xi[lxip[i]]; break; case XI_P_SYMM: delvp = delv_xi[i]; break; case XI_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phixi = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phixi) phixi = delvm; if (delvp < phixi) phixi = delvp; if (phixi < Real_t(0.)) phixi = Real_t(0.); if (phixi > monoq_max_slope) phixi = monoq_max_slope; /* phieta */ norm = Real_t(1.) / (delv_eta[i] + ptiny); switch (bcMask & ETA_M) { case 0: delvm = delv_eta[letam[i]]; break; case ETA_M_SYMM: delvm = delv_eta[i]; break; case ETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ETA_P) { case 0: delvp = delv_eta[letap[i]]; break; case ETA_P_SYMM: delvp = delv_eta[i]; break; case ETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phieta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phieta) phieta = delvm; if (delvp < phieta) phieta = delvp; if (phieta < Real_t(0.)) phieta = Real_t(0.); if (phieta > monoq_max_slope) phieta = monoq_max_slope; /* phizeta */ norm = Real_t(1.) / (delv_zeta[i] + ptiny); switch (bcMask & ZETA_M) { case 0: delvm = delv_zeta[lzetam[i]]; break; case ZETA_M_SYMM: delvm = delv_zeta[i]; break; case ZETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ZETA_P) { case 0: delvp = delv_zeta[lzetap[i]]; break; case ZETA_P_SYMM: delvp = delv_zeta[i]; break; case ZETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phizeta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phizeta) phizeta = delvm; if (delvp < phizeta) phizeta = delvp; if (phizeta < Real_t(0.)) phizeta = Real_t(0.); if (phizeta > monoq_max_slope) phizeta = monoq_max_slope; /* Remove length scale */ if (vdov[i] > Real_t(0.)) { qlin = Real_t(0.); qquad = Real_t(0.); } else { Real_t delvxxi = delv_xi[i] * delx_xi[i]; Real_t delvxeta = delv_eta[i] * delx_eta[i]; Real_t delvxzeta = delv_zeta[i] * delx_zeta[i]; if (delvxxi > Real_t(0.)) delvxxi = Real_t(0.); if (delvxeta > Real_t(0.)) delvxeta = Real_t(0.); if (delvxzeta > Real_t(0.)) delvxzeta = Real_t(0.); Real_t rho = elemMass[i] / (volo[i] * vnew[i]); qlin = -qlc_monoq * rho * (delvxxi * (Real_t(1.) - phixi) + delvxeta * (Real_t(1.) - phieta) + delvxzeta * (Real_t(1.) - phizeta)); qquad = qqc_monoq * rho * (delvxxi * delvxxi * (Real_t(1.) - phixi * phixi) + delvxeta * delvxeta * (Real_t(1.) - phieta * phieta) + delvxzeta * delvxzeta * (Real_t(1.) - phizeta * phizeta)); } qq[i] = qquad; ql[i] = qlin; } } static inline void CalcMonotonicQRegionForElems_gpu( // parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, cudaStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(elength, dimBlock.x), 1, 1); CalcMonotonicQRegionForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, elength, meshGPU.m_matElemlist, meshGPU.m_elemBC, meshGPU.m_lxim, meshGPU.m_lxip, meshGPU.m_letam, meshGPU.m_letap, meshGPU.m_lzetam, meshGPU.m_lzetap, meshGPU.m_delv_xi, meshGPU.m_delv_eta, meshGPU.m_delv_zeta, meshGPU.m_delx_xi, meshGPU.m_delx_eta, meshGPU.m_delx_zeta, meshGPU.m_vdov, meshGPU.m_elemMass, meshGPU.m_volo, meshGPU.m_vnew, meshGPU.m_qq, meshGPU.m_ql); CUDA_DEBUGSYNC; } static inline void CalcMonotonicQRegionForElems_cpu( // parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength) { for (Index_t ielem = 0; ielem < elength; ++ielem) { Real_t qlin, qquad; Real_t phixi, phieta, phizeta; Index_t i = mesh.matElemlist(ielem); Int_t bcMask = mesh.elemBC(i); Real_t delvm, delvp; /* phixi */ Real_t norm = Real_t(1.) / (mesh.delv_xi(i) + ptiny); switch (bcMask & XI_M) { case 0: delvm = mesh.delv_xi(mesh.lxim(i)); break; case XI_M_SYMM: delvm = mesh.delv_xi(i); break; case XI_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & XI_P) { case 0: delvp = mesh.delv_xi(mesh.lxip(i)); break; case XI_P_SYMM: delvp = mesh.delv_xi(i); break; case XI_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phixi = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phixi) phixi = delvm; if (delvp < phixi) phixi = delvp; if (phixi < Real_t(0.)) phixi = Real_t(0.); if (phixi > monoq_max_slope) phixi = monoq_max_slope; /* phieta */ norm = Real_t(1.) / (mesh.delv_eta(i) + ptiny); switch (bcMask & ETA_M) { case 0: delvm = mesh.delv_eta(mesh.letam(i)); break; case ETA_M_SYMM: delvm = mesh.delv_eta(i); break; case ETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ETA_P) { case 0: delvp = mesh.delv_eta(mesh.letap(i)); break; case ETA_P_SYMM: delvp = mesh.delv_eta(i); break; case ETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phieta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phieta) phieta = delvm; if (delvp < phieta) phieta = delvp; if (phieta < Real_t(0.)) phieta = Real_t(0.); if (phieta > monoq_max_slope) phieta = monoq_max_slope; /* phizeta */ norm = Real_t(1.) / (mesh.delv_zeta(i) + ptiny); switch (bcMask & ZETA_M) { case 0: delvm = mesh.delv_zeta(mesh.lzetam(i)); break; case ZETA_M_SYMM: delvm = mesh.delv_zeta(i); break; case ZETA_M_FREE: delvm = Real_t(0.0); break; default: /* ERROR */; break; } switch (bcMask & ZETA_P) { case 0: delvp = mesh.delv_zeta(mesh.lzetap(i)); break; case ZETA_P_SYMM: delvp = mesh.delv_zeta(i); break; case ZETA_P_FREE: delvp = Real_t(0.0); break; default: /* ERROR */; break; } delvm = delvm * norm; delvp = delvp * norm; phizeta = Real_t(.5) * (delvm + delvp); delvm *= monoq_limiter_mult; delvp *= monoq_limiter_mult; if (delvm < phizeta) phizeta = delvm; if (delvp < phizeta) phizeta = delvp; if (phizeta < Real_t(0.)) phizeta = Real_t(0.); if (phizeta > monoq_max_slope) phizeta = monoq_max_slope; /* Remove length scale */ if (mesh.vdov(i) > Real_t(0.)) { qlin = Real_t(0.); qquad = Real_t(0.); } else { Real_t delvxxi = mesh.delv_xi(i) * mesh.delx_xi(i); Real_t delvxeta = mesh.delv_eta(i) * mesh.delx_eta(i); Real_t delvxzeta = mesh.delv_zeta(i) * mesh.delx_zeta(i); if (delvxxi > Real_t(0.)) delvxxi = Real_t(0.); if (delvxeta > Real_t(0.)) delvxeta = Real_t(0.); if (delvxzeta > Real_t(0.)) delvxzeta = Real_t(0.); Real_t rho = mesh.elemMass(i) / (mesh.volo(i) * mesh.vnew(i)); qlin = -qlc_monoq * rho * (delvxxi * (Real_t(1.) - phixi) + delvxeta * (Real_t(1.) - phieta) + delvxzeta * (Real_t(1.) - phizeta)); qquad = qqc_monoq * rho * (delvxxi * delvxxi * (Real_t(1.) - phixi * phixi) + delvxeta * delvxeta * (Real_t(1.) - phieta * phieta) + delvxzeta * delvxzeta * (Real_t(1.) - phizeta * phizeta)); } mesh.qq(i) = qquad; mesh.ql(i) = qlin; } } static inline void CalcMonotonicQRegionForElems( // parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(matElemlist); FC(elemBC); FC(lxim); FC(lxip); FC(letam); FC(letap); FC(lzetam); FC(lzetap); FC(delv_xi); FC(delv_eta); FC(delv_zeta); FC(delx_xi); FC(delx_eta); FC(delx_zeta); FC(vdov); FC(elemMass); FC(volo); FC(vnew); CalcMonotonicQRegionForElems_cpu(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, elength); SG(qq); SG(ql); } else { FG(matElemlist); FG(elemBC); FG(lxim); FG(lxip); FG(letam); FG(letap); FG(lzetam); FG(lzetap); FG(delv_xi); FG(delv_eta); FG(delv_zeta); FG(delx_xi); FG(delx_eta); FG(delx_zeta); FG(vdov); FG(elemMass); FG(volo); FG(vnew); CalcMonotonicQRegionForElems_gpu(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, elength, stream_app); SC(qq); SC(ql); } } static inline void CalcMonotonicQForElems(int useCPU, cudaStream_t stream_app) { // // initialize parameters // const Real_t ptiny = Real_t(1.e-36); Real_t monoq_max_slope = mesh.monoq_max_slope(); Real_t monoq_limiter_mult = mesh.monoq_limiter_mult(); // // calculate the monotonic q for pure regions // Index_t elength = mesh.numElem(); if (elength > 0) { Real_t qlc_monoq = mesh.qlc_monoq(); Real_t qqc_monoq = mesh.qqc_monoq(); CalcMonotonicQRegionForElems( // parameters qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, // the elemset length elength, useCPU, stream_app); } } static inline void CalcQForElems(int useCPU, cudaStream_t stream_app) { Real_t qstop = mesh.qstop(); Index_t numElem = mesh.numElem(); // // MONOTONIC Q option // /* Calculate velocity gradients */ CalcMonotonicQGradientsForElems(useCPU, stream_app); /* Transfer veloctiy gradients in the first order elements */ /* problem->commElements->Transfer(CommElements::monoQ) ; */ CalcMonotonicQForElems(useCPU, stream_app); /* Don't allow excessive artificial viscosity */ /* if (numElem != 0) { Index_t idx = -1; for (Index_t i=0; i<numElem; ++i) { if ( mesh.q(i) > qstop ) { idx = i ; break ; } } if(idx >= 0) { exit(QStopError) ; } } */ } __global__ void CalcPressureForElems_kernel(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t* vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length, Real_t c1s) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { bvc[i] = c1s * (compression[i] + Real_t(1.)); pbvc[i] = c1s; p_new[i] = bvc[i] * e_old[i]; if (FABS(p_new[i]) < p_cut) p_new[i] = Real_t(0.0); if (vnewc[i] >= eosvmax) /* impossible condition here? */ p_new[i] = Real_t(0.0); if (p_new[i] < pmin) p_new[i] = pmin; } } static inline void CalcPressureForElems_gpu(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t* vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length, cudaStream_t stream_app) { Real_t c1s = Real_t(2.0) / Real_t(3.0); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); CalcPressureForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( p_new, bvc, pbvc, e_old, compression, vnewc, pmin, p_cut, eosvmax, length, c1s); CUDA_DEBUGSYNC; } static inline void CalcPressureForElems_cpu(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t* vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length) { Real_t c1s = Real_t(2.0) / Real_t(3.0); for (Index_t i = 0; i < length; ++i) { bvc[i] = c1s * (compression[i] + Real_t(1.)); pbvc[i] = c1s; } for (Index_t i = 0; i < length; ++i) { p_new[i] = bvc[i] * e_old[i]; if (FABS(p_new[i]) < p_cut) p_new[i] = Real_t(0.0); if (vnewc[i] >= eosvmax) /* impossible condition here? */ p_new[i] = Real_t(0.0); if (p_new[i] < pmin) p_new[i] = pmin; } } __global__ void CalcEnergyForElemsPart1_kernel(Index_t length, Real_t emin, Real_t* e_old, Real_t* delvc, Real_t* p_old, Real_t* q_old, Real_t* work, Real_t* e_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i]) + Real_t(0.5) * work[i]; if (e_new[i] < emin) { e_new[i] = emin; } } } __global__ void CalcEnergyForElemsPart2_kernel(Index_t length, Real_t rho0, Real_t e_cut, Real_t emin, Real_t* compHalfStep, Real_t* delvc, Real_t* pbvc, Real_t* bvc, Real_t* pHalfStep, Real_t* ql, Real_t* qq, Real_t* p_old, Real_t* q_old, Real_t* work, Real_t* e_new, Real_t* q_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]); if (delvc[i] > Real_t(0.)) { q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vhalf * vhalf * bvc[i] * pHalfStep[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] + Real_t(0.5) * delvc[i] * (Real_t(3.0) * (p_old[i] + q_old[i]) - Real_t(4.0) * (pHalfStep[i] + q_new[i])); e_new[i] += Real_t(0.5) * work[i]; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } } __global__ void CalcEnergyForElemsPart3_kernel(Index_t length, Real_t rho0, Real_t sixth, Real_t e_cut, Real_t emin, Real_t* pbvc, Real_t* vnewc, Real_t* bvc, Real_t* p_new, Real_t* ql, Real_t* qq, Real_t* p_old, Real_t* q_old, Real_t* pHalfStep, Real_t* q_new, Real_t* delvc, Real_t* e_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Real_t q_tilde; if (delvc[i] > Real_t(0.)) { q_tilde = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_tilde = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] - (Real_t(7.0) * (p_old[i] + q_old[i]) - Real_t(8.0) * (pHalfStep[i] + q_new[i]) + (p_new[i] + q_tilde)) * delvc[i] * sixth; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } } __global__ void CalcEnergyForElemsPart4_kernel(Index_t length, Real_t rho0, Real_t q_cut, Real_t* delvc, Real_t* pbvc, Real_t* e_new, Real_t* vnewc, Real_t* bvc, Real_t* p_new, Real_t* ql, Real_t* qq, Real_t* q_new) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { if (delvc[i] <= Real_t(0.)) { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.); } } } static inline void CalcEnergyForElems_gpu(Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* bvc, Real_t* pbvc, Real_t* p_old, Real_t* e_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin, Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin, Real_t* qq, Real_t* ql, Real_t rho0, Real_t eosvmax, Index_t length, cudaStream_t stream_app) { const Real_t sixth = Real_t(1.0) / Real_t(6.0); Real_t* pHalfStep; dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); CUDA(cudaMalloc(&pHalfStep, sizeof(Real_t) * length)); CalcEnergyForElemsPart1_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, emin, e_old, delvc, p_old, q_old, work, e_new); CUDA_DEBUGSYNC; CalcPressureForElems_gpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length, stream_app); CalcEnergyForElemsPart2_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, rho0, e_cut, emin, compHalfStep, delvc, pbvc, bvc, pHalfStep, ql, qq, p_old, q_old, work, e_new, q_new); CUDA_DEBUGSYNC; CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, stream_app); CalcEnergyForElemsPart3_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, rho0, sixth, e_cut, emin, pbvc, vnewc, bvc, p_new, ql, qq, p_old, q_old, pHalfStep, q_new, delvc, e_new); CUDA_DEBUGSYNC; CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, stream_app); CalcEnergyForElemsPart4_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, rho0, q_cut, delvc, pbvc, e_new, vnewc, bvc, p_new, ql, qq, q_new); CUDA_DEBUGSYNC; // CUDA( cudaFree(pHalfStep) ); return; } static inline void CalcEnergyForElems_cpu(Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* bvc, Real_t* pbvc, Real_t* p_old, Real_t* e_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin, Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin, Real_t* qq, Real_t* ql, Real_t rho0, Real_t eosvmax, Index_t length) { const Real_t sixth = Real_t(1.0) / Real_t(6.0); Real_t* pHalfStep = Allocate<Real_t>(length); for (Index_t i = 0; i < length; ++i) { e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i]) + Real_t(0.5) * work[i]; if (e_new[i] < emin) { e_new[i] = emin; } } CalcPressureForElems_cpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length); for (Index_t i = 0; i < length; ++i) { Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]); if (delvc[i] > Real_t(0.)) { q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vhalf * vhalf * bvc[i] * pHalfStep[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] + Real_t(0.5) * delvc[i] * (Real_t(3.0) * (p_old[i] + q_old[i]) - Real_t(4.0) * (pHalfStep[i] + q_new[i])); } for (Index_t i = 0; i < length; ++i) { e_new[i] += Real_t(0.5) * work[i]; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length); for (Index_t i = 0; i < length; ++i) { Real_t q_tilde; if (delvc[i] > Real_t(0.)) { q_tilde = Real_t(0.); } else { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_tilde = (ssc * ql[i] + qq[i]); } e_new[i] = e_new[i] - (Real_t(7.0) * (p_old[i] + q_old[i]) - Real_t(8.0) * (pHalfStep[i] + q_new[i]) + (p_new[i] + q_tilde)) * delvc[i] * sixth; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.); } if (e_new[i] < emin) { e_new[i] = emin; } } CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length); for (Index_t i = 0; i < length; ++i) { if (delvc[i] <= Real_t(0.)) { Real_t ssc = (pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i]) / rho0; if (ssc <= Real_t(0.)) { ssc = Real_t(.333333e-36); } else { ssc = SQRT(ssc); } q_new[i] = (ssc * ql[i] + qq[i]); if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.); } } Release(&pHalfStep); return; } __global__ void CalcSoundSpeedForElems_kernel(Real_t* vnewc, Real_t rho0, Real_t* enewc, Real_t* pnewc, Real_t* pbvc, Real_t* bvc, Real_t ss4o3, Index_t nz, Index_t* matElemlist, Real_t* ss) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < nz) { Index_t iz = matElemlist[i]; Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] * bvc[i] * pnewc[i]) / rho0; if (ssTmp <= Real_t(1.111111e-36)) { ssTmp = Real_t(1.111111e-36); } ss[iz] = SQRT(ssTmp); } } static inline void CalcSoundSpeedForElems_gpu(Real_t* vnewc, Real_t rho0, Real_t* enewc, Real_t* pnewc, Real_t* pbvc, Real_t* bvc, Real_t ss4o3, Index_t nz, cudaStream_t stream_app) { dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(nz, dimBlock.x), 1, 1); CalcSoundSpeedForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( vnewc, rho0, enewc, pnewc, pbvc, bvc, ss4o3, nz, meshGPU.m_matElemlist, meshGPU.m_ss); CUDA_DEBUGSYNC; } static inline void CalcSoundSpeedForElems_cpu(Real_t* vnewc, Real_t rho0, Real_t* enewc, Real_t* pnewc, Real_t* pbvc, Real_t* bvc, Real_t ss4o3, Index_t nz) { for (Index_t i = 0; i < nz; ++i) { Index_t iz = mesh.matElemlist(i); Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] * bvc[i] * pnewc[i]) / rho0; if (ssTmp <= Real_t(1.111111e-36)) { ssTmp = Real_t(1.111111e-36); } mesh.ss(iz) = SQRT(ssTmp); } } __global__ void EvalEOSForElemsPart1_kernel(Index_t length, Real_t eosvmin, Real_t eosvmax, Index_t* matElemlist, Real_t* e, Real_t* delv, Real_t* p, Real_t* q, Real_t* qq, Real_t* ql, Real_t* vnewc, Real_t* e_old, Real_t* delvc, Real_t* p_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* qq_old, Real_t* ql_old, Real_t* work) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Index_t zidx = matElemlist[i]; e_old[i] = e[zidx]; delvc[i] = delv[zidx]; p_old[i] = p[zidx]; q_old[i] = q[zidx]; Real_t vchalf; compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.); vchalf = vnewc[i] - delvc[i] * Real_t(.5); compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.); if (eosvmin != Real_t(0.)) { if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */ compHalfStep[i] = compression[i]; } } if (eosvmax != Real_t(0.)) { if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */ p_old[i] = Real_t(0.); compression[i] = Real_t(0.); compHalfStep[i] = Real_t(0.); } } qq_old[i] = qq[zidx]; ql_old[i] = ql[zidx]; work[i] = Real_t(0.); } } __global__ void EvalEOSForElemsPart2_kernel(Index_t length, Index_t* matElemlist, Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* p, Real_t* e, Real_t* q) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Index_t zidx = matElemlist[i]; p[zidx] = p_new[i]; e[zidx] = e_new[i]; q[zidx] = q_new[i]; } } static inline void EvalEOSForElems_gpu(Real_t* vnewc, Index_t length, cudaStream_t stream_app) { Real_t e_cut = mesh.e_cut(); Real_t p_cut = mesh.p_cut(); Real_t ss4o3 = mesh.ss4o3(); Real_t q_cut = mesh.q_cut(); Real_t eosvmax = mesh.eosvmax(); Real_t eosvmin = mesh.eosvmin(); Real_t pmin = mesh.pmin(); Real_t emin = mesh.emin(); Real_t rho0 = mesh.refdens(); Real_t *e_old, *delvc, *p_old, *q_old; Real_t *compression, *compHalfStep; Real_t *qq, *ql, *work, *p_new, *e_new, *q_new, *bvc, *pbvc; CUDA(cudaMalloc(&e_old, sizeof(Real_t) * length)); CUDA(cudaMalloc(&delvc, sizeof(Real_t) * length)); CUDA(cudaMalloc(&p_old, sizeof(Real_t) * length)); CUDA(cudaMalloc(&q_old, sizeof(Real_t) * length)); CUDA(cudaMalloc(&compression, sizeof(Real_t) * length)); CUDA(cudaMalloc(&compHalfStep, sizeof(Real_t) * length)); CUDA(cudaMalloc(&qq, sizeof(Real_t) * length)); CUDA(cudaMalloc(&ql, sizeof(Real_t) * length)); CUDA(cudaMalloc(&work, sizeof(Real_t) * length)); CUDA(cudaMalloc(&p_new, sizeof(Real_t) * length)); CUDA(cudaMalloc(&e_new, sizeof(Real_t) * length)); CUDA(cudaMalloc(&q_new, sizeof(Real_t) * length)); CUDA(cudaMalloc(&bvc, sizeof(Real_t) * length)); CUDA(cudaMalloc(&pbvc, sizeof(Real_t) * length)); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); EvalEOSForElemsPart1_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, eosvmin, eosvmax, meshGPU.m_matElemlist, meshGPU.m_e, meshGPU.m_delv, meshGPU.m_p, meshGPU.m_q, meshGPU.m_qq, meshGPU.m_ql, vnewc, e_old, delvc, p_old, q_old, compression, compHalfStep, qq, ql, work); CUDA_DEBUGSYNC; CalcEnergyForElems_gpu(p_new, e_new, q_new, bvc, pbvc, p_old, e_old, q_old, compression, compHalfStep, vnewc, work, delvc, pmin, p_cut, e_cut, q_cut, emin, qq, ql, rho0, eosvmax, length, stream_app); EvalEOSForElemsPart2_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, meshGPU.m_matElemlist, p_new, e_new, q_new, meshGPU.m_p, meshGPU.m_e, meshGPU.m_q); CUDA_DEBUGSYNC; CalcSoundSpeedForElems_gpu(vnewc, rho0, e_new, p_new, pbvc, bvc, ss4o3, length, stream_app); /*CUDA( cudaFree(pbvc) ); CUDA( cudaFree(bvc) ); CUDA( cudaFree(q_new) ); CUDA( cudaFree(e_new) ); CUDA( cudaFree(p_new) ); CUDA( cudaFree(work) ); CUDA( cudaFree(ql) ); CUDA( cudaFree(qq) ); CUDA( cudaFree(compHalfStep) ); CUDA( cudaFree(compression) ); CUDA( cudaFree(q_old) ); CUDA( cudaFree(p_old) ); CUDA( cudaFree(delvc) ); CUDA( cudaFree(e_old) );*/ } static inline void EvalEOSForElems_cpu(Real_t* vnewc, Index_t length) { Real_t e_cut = mesh.e_cut(); Real_t p_cut = mesh.p_cut(); Real_t ss4o3 = mesh.ss4o3(); Real_t q_cut = mesh.q_cut(); Real_t eosvmax = mesh.eosvmax(); Real_t eosvmin = mesh.eosvmin(); Real_t pmin = mesh.pmin(); Real_t emin = mesh.emin(); Real_t rho0 = mesh.refdens(); Real_t* e_old = Allocate<Real_t>(length); Real_t* delvc = Allocate<Real_t>(length); Real_t* p_old = Allocate<Real_t>(length); Real_t* q_old = Allocate<Real_t>(length); Real_t* compression = Allocate<Real_t>(length); Real_t* compHalfStep = Allocate<Real_t>(length); Real_t* qq = Allocate<Real_t>(length); Real_t* ql = Allocate<Real_t>(length); Real_t* work = Allocate<Real_t>(length); Real_t* p_new = Allocate<Real_t>(length); Real_t* e_new = Allocate<Real_t>(length); Real_t* q_new = Allocate<Real_t>(length); Real_t* bvc = Allocate<Real_t>(length); Real_t* pbvc = Allocate<Real_t>(length); /* compress data, minimal set */ for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); e_old[i] = mesh.e(zidx); } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); delvc[i] = mesh.delv(zidx); } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); p_old[i] = mesh.p(zidx); } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); q_old[i] = mesh.q(zidx); } for (Index_t i = 0; i < length; ++i) { Real_t vchalf; compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.); vchalf = vnewc[i] - delvc[i] * Real_t(.5); compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.); } /* Check for v > eosvmax or v < eosvmin */ if (eosvmin != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */ compHalfStep[i] = compression[i]; } } } if (eosvmax != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */ p_old[i] = Real_t(0.); compression[i] = Real_t(0.); compHalfStep[i] = Real_t(0.); } } } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); qq[i] = mesh.qq(zidx); ql[i] = mesh.ql(zidx); work[i] = Real_t(0.); } CalcEnergyForElems_cpu(p_new, e_new, q_new, bvc, pbvc, p_old, e_old, q_old, compression, compHalfStep, vnewc, work, delvc, pmin, p_cut, e_cut, q_cut, emin, qq, ql, rho0, eosvmax, length); for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); mesh.p(zidx) = p_new[i]; } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); mesh.e(zidx) = e_new[i]; } for (Index_t i = 0; i < length; ++i) { Index_t zidx = mesh.matElemlist(i); mesh.q(zidx) = q_new[i]; } CalcSoundSpeedForElems_cpu(vnewc, rho0, e_new, p_new, pbvc, bvc, ss4o3, length); Release(&pbvc); Release(&bvc); Release(&q_new); Release(&e_new); Release(&p_new); Release(&work); Release(&ql); Release(&qq); Release(&compHalfStep); Release(&compression); Release(&q_old); Release(&p_old); Release(&delvc); Release(&e_old); } __global__ void ApplyMaterialPropertiesForElemsPart1_kernel( Index_t length, Real_t eosvmin, Real_t eosvmax, Index_t* matElemlist, Real_t* vnew, Real_t* vnewc) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { Index_t zn = matElemlist[i]; vnewc[i] = vnew[zn]; if (eosvmin != Real_t(0.)) { if (vnewc[i] < eosvmin) vnewc[i] = eosvmin; } if (eosvmax != Real_t(0.)) { if (vnewc[i] > eosvmax) vnewc[i] = eosvmax; } } } static inline void ApplyMaterialPropertiesForElems_gpu( cudaStream_t stream_app) { Index_t length = mesh.numElem(); if (length != 0) { /* Expose all of the variables needed for material evaluation */ Real_t eosvmin = mesh.eosvmin(); Real_t eosvmax = mesh.eosvmax(); Real_t* vnewc; CUDA(cudaMalloc(&vnewc, sizeof(Real_t) * length)); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); ApplyMaterialPropertiesForElemsPart1_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, eosvmin, eosvmax, meshGPU.m_matElemlist, meshGPU.m_vnew, vnewc); CUDA_DEBUGSYNC; /* for (Index_t i=0; i<length; ++i) { Index_t zn = mesh.matElemlist(i) ; Real_t vc = mesh.v(zn) ; if (eosvmin != Real_t(0.)) { if (vc < eosvmin) vc = eosvmin ; } if (eosvmax != Real_t(0.)) { if (vc > eosvmax) vc = eosvmax ; } if (vc <= 0.) { exit(VolumeError) ; } } */ EvalEOSForElems_gpu(vnewc, length, stream_app); // CUDA( cudaFree(vnewc) ); } } static inline void ApplyMaterialPropertiesForElems_cpu() { Index_t length = mesh.numElem(); if (length != 0) { /* Expose all of the variables needed for material evaluation */ Real_t eosvmin = mesh.eosvmin(); Real_t eosvmax = mesh.eosvmax(); Real_t* vnewc = Allocate<Real_t>(length); for (Index_t i = 0; i < length; ++i) { Index_t zn = mesh.matElemlist(i); vnewc[i] = mesh.vnew(zn); } if (eosvmin != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] < eosvmin) vnewc[i] = eosvmin; } } if (eosvmax != Real_t(0.)) { for (Index_t i = 0; i < length; ++i) { if (vnewc[i] > eosvmax) vnewc[i] = eosvmax; } } for (Index_t i = 0; i < length; ++i) { Index_t zn = mesh.matElemlist(i); Real_t vc = mesh.v(zn); if (eosvmin != Real_t(0.)) { if (vc < eosvmin) vc = eosvmin; } if (eosvmax != Real_t(0.)) { if (vc > eosvmax) vc = eosvmax; } if (vc <= 0.) { exit(VolumeError); } } EvalEOSForElems_cpu(vnewc, length); Release(&vnewc); } } static inline void ApplyMaterialPropertiesForElems(int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(matElemlist); FC(vnew); FC(v); FC(e); FC(delv); FC(p); FC(q); FC(qq); FC(ql); ApplyMaterialPropertiesForElems_cpu(); SG(p); SG(e); SG(q); SG(ss); } else { FG(matElemlist); FG(vnew); FG(v); FG(e); FG(delv); FG(p); FG(q); FG(qq); FG(ql); ApplyMaterialPropertiesForElems_gpu(stream_app); SC(p); SC(e); SC(q); SC(ss); } } __global__ void UpdateVolumesForElems_kernel(Index_t numElem, Real_t v_cut, Real_t* vnew, Real_t* v) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElem) { Real_t tmpV; tmpV = vnew[i]; if (FABS(tmpV - Real_t(1.0)) < v_cut) tmpV = Real_t(1.0); v[i] = tmpV; } } static inline void UpdateVolumesForElems_gpu(cudaStream_t stream_app) { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t v_cut = mesh.v_cut(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(numElem, dimBlock.x), 1, 1); UpdateVolumesForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( numElem, v_cut, meshGPU.m_vnew, meshGPU.m_v); } } static inline void UpdateVolumesForElems_cpu() { Index_t numElem = mesh.numElem(); if (numElem != 0) { Real_t v_cut = mesh.v_cut(); for (Index_t i = 0; i < numElem; ++i) { Real_t tmpV; tmpV = mesh.vnew(i); if (FABS(tmpV - Real_t(1.0)) < v_cut) tmpV = Real_t(1.0); mesh.v(i) = tmpV; } } return; } static inline void UpdateVolumesForElems(int useCPU, cudaStream_t stream_app) { if (useCPU) { FC(vnew); UpdateVolumesForElems_cpu(); SG(v); } else { FG(vnew); UpdateVolumesForElems_gpu(stream_app); SC(v); } } static inline void LagrangeElements(int useCPU, cudaStream_t stream_app) { const Real_t deltatime = mesh.deltatime(); CalcLagrangeElements(deltatime, useCPU, stream_app); /* Calculate Q. (Monotonic q option requires communication) */ CalcQForElems(useCPU, stream_app); ApplyMaterialPropertiesForElems(useCPU, stream_app); UpdateVolumesForElems(useCPU, stream_app); } __global__ void CalcCourantConstraintForElems_kernel(Index_t length, Real_t qqc2, Index_t* matElemlist, Real_t* ss, Real_t* vdov, Real_t* arealg, Real_t* mindtcourant) { __shared__ Real_t minArray[BLOCKSIZE]; int i = blockDim.x * blockIdx.x + threadIdx.x; Real_t dtcourant = Real_t(1.0e+20); if (i < length) { Index_t indx = matElemlist[i]; Real_t dtf = ss[indx] * ss[indx]; if (vdov[indx] < Real_t(0.)) { dtf = dtf + qqc2 * arealg[indx] * arealg[indx] * vdov[indx] * vdov[indx]; } dtf = SQRT(dtf); dtf = arealg[indx] / dtf; /* determine minimum timestep with its corresponding elem */ if (vdov[indx] != Real_t(0.)) { if (dtf < dtcourant) { dtcourant = dtf; } } } minArray[threadIdx.x] = dtcourant; reduceMin<Real_t, BLOCKSIZE>(minArray, threadIdx.x); if (threadIdx.x == 0) mindtcourant[blockIdx.x] = minArray[0]; } static inline void CalcCourantConstraintForElems_gpu(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Real_t qqc = mesh.qqc(); Real_t qqc2 = Real_t(64.0) * qqc * qqc; Index_t length = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); Real_t* dev_mindtcourant; CUDA(cudaMalloc(&dev_mindtcourant, sizeof(Real_t) * dimGrid.x)); CalcCourantConstraintForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, qqc2, meshGPU.m_matElemlist, meshGPU.m_ss, meshGPU.m_vdov, meshGPU.m_arealg, dev_mindtcourant); CUDA_DEBUGSYNC; pthread_mutex_unlock(mutexapp); if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); Real_t* mindtcourant = (Real_t*)malloc(sizeof(Real_t) * dimGrid.x); CUDA(cudaMemcpyAsync(mindtcourant, dev_mindtcourant, sizeof(Real_t) * dimGrid.x, cudaMemcpyDeviceToHost, stream_app)); // CUDA( cudaFree(dev_mindtcourant) ); // finish the MIN computation over the thread blocks Real_t dtcourant; dtcourant = mindtcourant[0]; for (int i = 1; i < dimGrid.x; i++) { MINEQ(dtcourant, mindtcourant[i]); } if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); free(mindtcourant); if (dtcourant < Real_t(1.0e+20)) mesh.dtcourant() = dtcourant; } static inline void CalcCourantConstraintForElems_cpu() { Real_t dtcourant = Real_t(1.0e+20); Index_t courant_elem = -1; Real_t qqc = mesh.qqc(); Index_t length = mesh.numElem(); Real_t qqc2 = Real_t(64.0) * qqc * qqc; for (Index_t i = 0; i < length; ++i) { Index_t indx = mesh.matElemlist(i); Real_t dtf = mesh.ss(indx) * mesh.ss(indx); if (mesh.vdov(indx) < Real_t(0.)) { dtf = dtf + qqc2 * mesh.arealg(indx) * mesh.arealg(indx) * mesh.vdov(indx) * mesh.vdov(indx); } dtf = SQRT(dtf); dtf = mesh.arealg(indx) / dtf; /* determine minimum timestep with its corresponding elem */ if (mesh.vdov(indx) != Real_t(0.)) { if (dtf < dtcourant) { dtcourant = dtf; courant_elem = indx; } } } /* Don't try to register a time constraint if none of the elements * were active */ if (courant_elem != -1) { mesh.dtcourant() = dtcourant; } return; } static inline void CalcCourantConstraintForElems(int useCPU, cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { if (useCPU) { FC(matElemlist); FC(ss); FC(vdov); FC(arealg); CalcCourantConstraintForElems_cpu(); } else { FG(matElemlist); FG(ss); FG(vdov); FG(arealg); CalcCourantConstraintForElems_gpu(stream_app, mutexapp, flag); } } __global__ void CalcHydroConstraintForElems_kernel(Index_t length, Real_t dvovmax, Index_t* matElemlist, Real_t* vdov, Real_t* mindthydro) { __shared__ Real_t minArray[BLOCKSIZE]; int i = blockDim.x * blockIdx.x + threadIdx.x; Real_t dthydro = Real_t(1.0e+20); if (i < length) { Index_t indx = matElemlist[i]; if (vdov[indx] != Real_t(0.)) { Real_t dtdvov = dvovmax / (FABS(vdov[indx]) + Real_t(1.e-20)); if (dthydro > dtdvov) { dthydro = dtdvov; } } } minArray[threadIdx.x] = dthydro; reduceMin<Real_t, BLOCKSIZE>(minArray, threadIdx.x); if (threadIdx.x == 0) mindthydro[blockIdx.x] = minArray[0]; } static inline void CalcHydroConstraintForElems_gpu(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Real_t dvovmax = mesh.dvovmax(); Index_t length = mesh.numElem(); dim3 dimBlock = dim3(BLOCKSIZE, 1, 1); dim3 dimGrid = dim3(PAD_DIV(length, dimBlock.x), 1, 1); Real_t* dev_mindthydro; CUDA(cudaMalloc(&dev_mindthydro, sizeof(Real_t) * dimGrid.x)); CalcHydroConstraintForElems_kernel<<<dimGrid, dimBlock, 0, stream_app>>>( length, dvovmax, meshGPU.m_matElemlist, meshGPU.m_vdov, dev_mindthydro); CUDA_DEBUGSYNC; pthread_mutex_unlock(mutexapp); if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); Real_t* mindthydro = (Real_t*)malloc(sizeof(Real_t) * dimGrid.x); CUDA(cudaMemcpyAsync(mindthydro, dev_mindthydro, sizeof(Real_t) * dimGrid.x, cudaMemcpyDeviceToHost, stream_app)); // CUDA( cudaFree(dev_mindthydro) ); // finish the MIN computation over the thread blocks Real_t dthydro = mindthydro[0]; for (int i = 1; i < dimGrid.x; i++) { MINEQ(dthydro, mindthydro[i]); } if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); free(mindthydro); if (dthydro < Real_t(1.0e+20)) mesh.dthydro() = dthydro; } static inline void CalcHydroConstraintForElems_cpu() { Real_t dthydro = Real_t(1.0e+20); Index_t hydro_elem = -1; Real_t dvovmax = mesh.dvovmax(); Index_t length = mesh.numElem(); for (Index_t i = 0; i < length; ++i) { Index_t indx = mesh.matElemlist(i); if (mesh.vdov(indx) != Real_t(0.)) { Real_t dtdvov = dvovmax / (FABS(mesh.vdov(indx)) + Real_t(1.e-20)); if (dthydro > dtdvov) { dthydro = dtdvov; hydro_elem = indx; } } } if (hydro_elem != -1) { mesh.dthydro() = dthydro; } return; } static inline void CalcHydroConstraintForElems(int useCPU, cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { if (useCPU) { FC(matElemlist); FC(vdov); CalcHydroConstraintForElems_cpu(); } else { FG(matElemlist); FG(vdov); CalcHydroConstraintForElems_gpu(stream_app, mutexapp, flag); } } static inline void CalcTimeConstraintsForElems(int useCPU, cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { /* evaluate time constraint */ CalcCourantConstraintForElems(useCPU, stream_app, mutexapp, flag); pthread_mutex_lock(mutexapp); /* check hydro constraint */ CalcHydroConstraintForElems(useCPU, stream_app, mutexapp, flag); pthread_mutex_lock(mutexapp); } static inline void LagrangeLeapFrog(int useCPU, cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { /* calculate nodal forces, accelerations, velocities, positions, with * applied boundary conditions and slide surface considerations */ LagrangeNodal(useCPU, stream_app); /* calculate element quantities (i.e. velocity gradient & q), and update * material states */ LagrangeElements(useCPU, stream_app); CalcTimeConstraintsForElems(useCPU, stream_app, mutexapp, flag); // LagrangeRelease() ; Creation/destruction of temps may be important to // capture } int main_lulesh(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Index_t edgeElems = 45; Index_t edgeNodes = edgeElems + 1; // Real_t ds = Real_t(1.125)/Real_t(edgeElems) ; /* may accumulate roundoff */ Real_t tx, ty, tz; Index_t nidx, zidx; Index_t meshElems; streamApp = stream_app; /* get run options to measure various metrics */ /* ... */ cuda_init(); /****************************/ /* Initialize Sedov Mesh */ /****************************/ /* construct a uniform box for this processor */ mesh.sizeX() = edgeElems; mesh.sizeY() = edgeElems; mesh.sizeZ() = edgeElems; mesh.numElem() = edgeElems * edgeElems * edgeElems; mesh.numNode() = edgeNodes * edgeNodes * edgeNodes; meshElems = mesh.numElem(); /* allocate field memory */ mesh.AllocateElemPersistent(mesh.numElem()); mesh.AllocateElemTemporary(mesh.numElem()); mesh.AllocateNodalPersistent(mesh.numNode()); mesh.AllocateNodesets(edgeNodes * edgeNodes); /* initialize nodal coordinates */ nidx = 0; tz = Real_t(0.); for (Index_t plane = 0; plane < edgeNodes; ++plane) { ty = Real_t(0.); for (Index_t row = 0; row < edgeNodes; ++row) { tx = Real_t(0.); for (Index_t col = 0; col < edgeNodes; ++col) { mesh.x(nidx) = tx; mesh.y(nidx) = ty; mesh.z(nidx) = tz; ++nidx; // tx += ds ; /* may accumulate roundoff... */ tx = Real_t(1.125) * Real_t(col + 1) / Real_t(edgeElems); } // ty += ds ; /* may accumulate roundoff... */ ty = Real_t(1.125) * Real_t(row + 1) / Real_t(edgeElems); } // tz += ds ; /* may accumulate roundoff... */ tz = Real_t(1.125) * Real_t(plane + 1) / Real_t(edgeElems); } /* embed hexehedral elements in nodal point lattice */ nidx = 0; zidx = 0; for (Index_t plane = 0; plane < edgeElems; ++plane) { for (Index_t row = 0; row < edgeElems; ++row) { for (Index_t col = 0; col < edgeElems; ++col) { mesh.nodelist(zidx, 0) = nidx; mesh.nodelist(zidx, 1) = nidx + 1; mesh.nodelist(zidx, 2) = nidx + edgeNodes + 1; mesh.nodelist(zidx, 3) = nidx + edgeNodes; mesh.nodelist(zidx, 4) = nidx + edgeNodes * edgeNodes; mesh.nodelist(zidx, 5) = nidx + edgeNodes * edgeNodes + 1; mesh.nodelist(zidx, 6) = nidx + edgeNodes * edgeNodes + edgeNodes + 1; mesh.nodelist(zidx, 7) = nidx + edgeNodes * edgeNodes + edgeNodes; ++zidx; ++nidx; } ++nidx; } nidx += edgeNodes; } /* Create a material IndexSet (entire mesh same material for now) */ for (Index_t i = 0; i < meshElems; ++i) { mesh.matElemlist(i) = i; } /* initialize material parameters */ mesh.dtfixed() = Real_t(-1.0e-7); mesh.deltatime() = Real_t(1.0e-7); mesh.deltatimemultlb() = Real_t(1.1); mesh.deltatimemultub() = Real_t(1.2); mesh.stoptime() = Real_t(1.0e-2); mesh.dtcourant() = Real_t(1.0e+20); mesh.dthydro() = Real_t(1.0e+20); mesh.dtmax() = Real_t(1.0e-2); mesh.time() = Real_t(0.); mesh.cycle() = 0; mesh.e_cut() = Real_t(1.0e-7); mesh.p_cut() = Real_t(1.0e-7); mesh.q_cut() = Real_t(1.0e-7); mesh.u_cut() = Real_t(1.0e-7); mesh.v_cut() = Real_t(1.0e-10); mesh.hgcoef() = Real_t(3.0); mesh.ss4o3() = Real_t(4.0) / Real_t(3.0); mesh.qstop() = Real_t(1.0e+12); mesh.monoq_max_slope() = Real_t(1.0); mesh.monoq_limiter_mult() = Real_t(2.0); mesh.qlc_monoq() = Real_t(0.5); mesh.qqc_monoq() = Real_t(2.0) / Real_t(3.0); mesh.qqc() = Real_t(2.0); mesh.pmin() = Real_t(0.); mesh.emin() = Real_t(-1.0e+15); mesh.dvovmax() = Real_t(0.1); mesh.eosvmax() = Real_t(1.0e+9); mesh.eosvmin() = Real_t(1.0e-9); mesh.refdens() = Real_t(1.0); /* initialize field data */ for (Index_t i = 0; i < meshElems; ++i) { Real_t x_local[8], y_local[8], z_local[8]; for (Index_t lnode = 0; lnode < 8; ++lnode) { Index_t gnode = mesh.nodelist(i, lnode); x_local[lnode] = mesh.x(gnode); y_local[lnode] = mesh.y(gnode); z_local[lnode] = mesh.z(gnode); } // volume calculations Real_t volume = CalcElemVolume(x_local, y_local, z_local); mesh.volo(i) = volume; mesh.elemMass(i) = volume; for (Index_t j = 0; j < 8; ++j) { Index_t idx = mesh.nodelist(i, j); mesh.nodalMass(idx) += volume / Real_t(8.0); } } /* deposit energy */ mesh.e(0) = Real_t(3.948746e+7); /* set up symmetry nodesets */ nidx = 0; for (Index_t i = 0; i < edgeNodes; ++i) { Index_t planeInc = i * edgeNodes * edgeNodes; Index_t rowInc = i * edgeNodes; for (Index_t j = 0; j < edgeNodes; ++j) { mesh.symmX(nidx) = planeInc + j * edgeNodes; mesh.symmY(nidx) = planeInc + j; mesh.symmZ(nidx) = rowInc + j; ++nidx; } } /* set up elemement connectivity information */ mesh.lxim(0) = 0; for (Index_t i = 1; i < meshElems; ++i) { mesh.lxim(i) = i - 1; mesh.lxip(i - 1) = i; } mesh.lxip(meshElems - 1) = meshElems - 1; for (Index_t i = 0; i < edgeElems; ++i) { mesh.letam(i) = i; mesh.letap(meshElems - edgeElems + i) = meshElems - edgeElems + i; } for (Index_t i = edgeElems; i < meshElems; ++i) { mesh.letam(i) = i - edgeElems; mesh.letap(i - edgeElems) = i; } for (Index_t i = 0; i < edgeElems * edgeElems; ++i) { mesh.lzetam(i) = i; mesh.lzetap(meshElems - edgeElems * edgeElems + i) = meshElems - edgeElems * edgeElems + i; } for (Index_t i = edgeElems * edgeElems; i < meshElems; ++i) { mesh.lzetam(i) = i - edgeElems * edgeElems; mesh.lzetap(i - edgeElems * edgeElems) = i; } /* set up boundary condition information */ for (Index_t i = 0; i < meshElems; ++i) { mesh.elemBC(i) = 0; /* clear BCs by default */ } /* faces on "external" boundaries will be */ /* symmetry plane or free surface BCs */ for (Index_t i = 0; i < edgeElems; ++i) { Index_t planeInc = i * edgeElems * edgeElems; Index_t rowInc = i * edgeElems; for (Index_t j = 0; j < edgeElems; ++j) { mesh.elemBC(planeInc + j * edgeElems) |= XI_M_SYMM; mesh.elemBC(planeInc + j * edgeElems + edgeElems - 1) |= XI_P_FREE; mesh.elemBC(planeInc + j) |= ETA_M_SYMM; mesh.elemBC(planeInc + j + edgeElems * edgeElems - edgeElems) |= ETA_P_FREE; mesh.elemBC(rowInc + j) |= ZETA_M_SYMM; mesh.elemBC(rowInc + j + meshElems - edgeElems * edgeElems) |= ZETA_P_FREE; } } mesh.AllocateNodeElemIndexes(); /* initialize meshGPU */ meshGPU.init(&mesh); meshGPU.freshenGPU(); /* timestep to solution */ int its = 0; #if 0 while (its<50) { #else while (mesh.time() < mesh.stoptime()) { #endif TimeIncrement(); LagrangeLeapFrog(0, stream_app, mutexapp, flag); its++; /* problem->commNodes->Transfer(CommNodes::syncposvel) ; */ #if LULESH_SHOW_PROGRESS printf("time = %e, dt=%e\n", double(mesh.time()), double(mesh.deltatime())); #endif } printf("iterations: %d\n", its); // FC(x); // FILE *fp = fopen("x.asc","wb"); // for (Index_t i=0; i<mesh.numElem(); i++) // fprintf(fp,"%.6f\n",mesh.x(i)); // fclose(fp); return 0; }
28790f14a39184cf4cdd812194d3087e325e937b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NMS implementation in CUDA from pytorch framework (https://github.com/pytorch/vision/tree/master/torchvision/csrc/cuda on Nov 13 2019) Adapted for additional 3D capability by G. Ramien, DKFZ Heidelberg Parts of this code are from torchvision and thus licensed under BSD 3-Clause License Copyright (c) Soumith Chintala 2016, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #include "cuda_helpers.h" #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename T> __device__ inline float devIoU(T const* const a, T const* const b) { // a, b hold box coords as (y1, x1, y2, x2) with y1 < y2 etc. T bottom = max(a[0], b[0]), top = min(a[2], b[2]); T left = max(a[1], b[1]), right = min(a[3], b[3]); T width = max(right - left, (T)0), height = max(top - bottom, (T)0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } template <typename T> __device__ inline float devIoU_3d(T const* const a, T const* const b) { // a, b hold box coords as (y1, x1, y2, x2, z1, z2) with y1 < y2 etc. // get coordinates of intersection, calc intersection T bottom = max(a[0], b[0]), top = min(a[2], b[2]); T left = max(a[1], b[1]), right = min(a[3], b[3]); T front = max(a[4], b[4]), back = min(a[5], b[5]); T width = max(right - left, (T)0), height = max(top - bottom, (T)0); T depth = max(back - front, (T)0); T interS = width * height * depth; // calc separate boxes volumes T Sa = (a[2] - a[0]) * (a[3] - a[1]) * (a[5] - a[4]); T Sb = (b[2] - b[0]) * (b[3] - b[1]) * (b[5] - b[4]); return interS / (Sa + Sb - interS); } template <typename T> __global__ void nms_kernel(const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 4; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU<T>(cur_box, block_boxes + i * 4) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } template <typename T> __global__ void nms_kernel_3d(const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU_3d<T>(cur_box, block_boxes + i * 6) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } at::Tensor nms_cuda(const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { /* dets expected as (n_dets, dim) where dim=4 in 2D, dim=6 in 3D */ AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device()); bool is_3d = dets.size(1) == 6; auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); int dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (is_3d) { //std::cout << "performing NMS on 3D boxes in CUDA" << std::endl; AT_DISPATCH_FLOATING_TYPES_AND_HALF( dets_sorted.type(), "nms_kernel_cuda", [&] { hipLaunchKernelGGL(( nms_kernel_3d<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF( dets_sorted.type(), "nms_kernel_cuda", [&] { hipLaunchKernelGGL(( nms_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); } at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(hipGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); }
28790f14a39184cf4cdd812194d3087e325e937b.cu
/* NMS implementation in CUDA from pytorch framework (https://github.com/pytorch/vision/tree/master/torchvision/csrc/cuda on Nov 13 2019) Adapted for additional 3D capability by G. Ramien, DKFZ Heidelberg Parts of this code are from torchvision and thus licensed under BSD 3-Clause License Copyright (c) Soumith Chintala 2016, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include "cuda_helpers.h" #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename T> __device__ inline float devIoU(T const* const a, T const* const b) { // a, b hold box coords as (y1, x1, y2, x2) with y1 < y2 etc. T bottom = max(a[0], b[0]), top = min(a[2], b[2]); T left = max(a[1], b[1]), right = min(a[3], b[3]); T width = max(right - left, (T)0), height = max(top - bottom, (T)0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } template <typename T> __device__ inline float devIoU_3d(T const* const a, T const* const b) { // a, b hold box coords as (y1, x1, y2, x2, z1, z2) with y1 < y2 etc. // get coordinates of intersection, calc intersection T bottom = max(a[0], b[0]), top = min(a[2], b[2]); T left = max(a[1], b[1]), right = min(a[3], b[3]); T front = max(a[4], b[4]), back = min(a[5], b[5]); T width = max(right - left, (T)0), height = max(top - bottom, (T)0); T depth = max(back - front, (T)0); T interS = width * height * depth; // calc separate boxes volumes T Sa = (a[2] - a[0]) * (a[3] - a[1]) * (a[5] - a[4]); T Sb = (b[2] - b[0]) * (b[3] - b[1]) * (b[5] - b[4]); return interS / (Sa + Sb - interS); } template <typename T> __global__ void nms_kernel(const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 4; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU<T>(cur_box, block_boxes + i * 4) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } template <typename T> __global__ void nms_kernel_3d(const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU_3d<T>(cur_box, block_boxes + i * 6) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } at::Tensor nms_cuda(const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { /* dets expected as (n_dets, dim) where dim=4 in 2D, dim=6 in 3D */ AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(dets.device()); bool is_3d = dets.size(1) == 6; auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); int dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (is_3d) { //std::cout << "performing NMS on 3D boxes in CUDA" << std::endl; AT_DISPATCH_FLOATING_TYPES_AND_HALF( dets_sorted.type(), "nms_kernel_cuda", [&] { nms_kernel_3d<scalar_t><<<blocks, threads, 0, stream>>>( dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF( dets_sorted.type(), "nms_kernel_cuda", [&] { nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>( dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); } at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(cudaGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); }
ab4060fd5527f23e4e5a17a7a2acb8332f602878.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #define size 16384 #define threadsize 1024 __global__ void MatrixAddition(int *a, int *b, int *c){ //1d grid with 2d blocks int id = blockIdx.x *blockDim.x * blockDim.y + threadIdx.y * blockDim.x+ threadIdx.x; c[id] = a[id] + b[id]; } int main(){ const long long int totalsize = size*sizeof(int); long long int summation = 0; float time1 = 0.0; int *matA = (int*)malloc(totalsize); int *matB = (int*)malloc(totalsize); int *matC = (int*)malloc (totalsize); for(int i = 0; i < size;i++){ matA[i] = 1; matB[i] = 2; // matC[i] = 0; } int *matAD; int *matBD; int *matCD; hipMalloc((void**)&matAD,totalsize); hipMalloc((void**)&matBD, totalsize); hipMalloc((void**)&matCD, totalsize); hipEvent_t start1,end1; hipEventCreate(&start1); hipEventCreate(&end1); hipMemcpy(matAD, matA, totalsize, hipMemcpyHostToDevice); hipMemcpy(matBD, matB, totalsize, hipMemcpyHostToDevice); dim3 dimGrid(size/threadsize,1); dim3 dimBlock(32,32); hipEventRecord(start1); hipLaunchKernelGGL(( MatrixAddition), dim3(dimGrid), dim3(dimBlock), 0, 0, matAD,matBD, matCD); hipEventRecord(end1); hipMemcpy(matC, matCD, totalsize, hipMemcpyDeviceToHost); hipEventSynchronize(end1); hipEventElapsedTime(&time1,start1,end1); printf("elapsed time is %lf milli secs \n",time1); for(int i = 0; i < size; i++){ summation += matC[i]; } printf("Sum is %lld ", summation); hipFree(matAD); hipFree(matBD); hipFree(matCD); }
ab4060fd5527f23e4e5a17a7a2acb8332f602878.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #define size 16384 #define threadsize 1024 __global__ void MatrixAddition(int *a, int *b, int *c){ //1d grid with 2d blocks int id = blockIdx.x *blockDim.x * blockDim.y + threadIdx.y * blockDim.x+ threadIdx.x; c[id] = a[id] + b[id]; } int main(){ const long long int totalsize = size*sizeof(int); long long int summation = 0; float time1 = 0.0; int *matA = (int*)malloc(totalsize); int *matB = (int*)malloc(totalsize); int *matC = (int*)malloc (totalsize); for(int i = 0; i < size;i++){ matA[i] = 1; matB[i] = 2; // matC[i] = 0; } int *matAD; int *matBD; int *matCD; cudaMalloc((void**)&matAD,totalsize); cudaMalloc((void**)&matBD, totalsize); cudaMalloc((void**)&matCD, totalsize); cudaEvent_t start1,end1; cudaEventCreate(&start1); cudaEventCreate(&end1); cudaMemcpy(matAD, matA, totalsize, cudaMemcpyHostToDevice); cudaMemcpy(matBD, matB, totalsize, cudaMemcpyHostToDevice); dim3 dimGrid(size/threadsize,1); dim3 dimBlock(32,32); cudaEventRecord(start1); MatrixAddition<<<dimGrid, dimBlock>>>(matAD,matBD, matCD); cudaEventRecord(end1); cudaMemcpy(matC, matCD, totalsize, cudaMemcpyDeviceToHost); cudaEventSynchronize(end1); cudaEventElapsedTime(&time1,start1,end1); printf("elapsed time is %lf milli secs \n",time1); for(int i = 0; i < size; i++){ summation += matC[i]; } printf("Sum is %lld ", summation); cudaFree(matAD); cudaFree(matBD); cudaFree(matCD); }
b98803f36c2deb02feae5ff200db6050bac5fad3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <chrono> #include <hiprand/hiprand.h> #include "utils.h" using namespace std; using namespace chrono; #define BLOCK_NUM 64 // GPU #define BLOCK_SIZE 512 // GPU #define RAND_SIZE 1000 // CUDA #define ITERATIONS 1000 // #define ARG_NUM 10 // #define ARG_LIMIT 100 // #define BIAS 22 // #define F 0.5 // #define CR 0.3 // /* * * * (-1)^i * (x_i)^i * i + BIAS = 0, i > 0 * * x_iARG_LIMIT * ARG_NUMi */ /** * \brief GPU * \param arg_list * \param result_list GPU * \param rand */ __global__ void DifferentialEvolution(const double* arg_list, double* result_list, const double* rand) { // GPU __shared__ double results[BLOCK_SIZE][ARG_NUM + 1]; // auto randIndex = threadIdx.x + 1; const auto step = blockIdx.x + 1; // for (auto i = 0; i < ARG_NUM; ++i) { int r1, r2, r3; do { r1 = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; r2 = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; r3 = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; } while (r1 == r2 || r2 == r3 || r1 == r3); results[threadIdx.x][i] = arg_list[r1] + F * (arg_list[r2] - arg_list[r3]); if (abs(results[threadIdx.x][i]) > ARG_LIMIT) { results[threadIdx.x][i] = (rand[randIndex] - 0.5) * 2 * ARG_LIMIT; randIndex = (randIndex + step) % RAND_SIZE; } } // const auto j = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; for (auto i = 0; i < ARG_NUM; ++i) { if (i != j && rand[randIndex] > CR) { results[threadIdx.x][i] = arg_list[i]; } randIndex = (randIndex + step) % RAND_SIZE; } // results[threadIdx.x][ARG_NUM] = 0; for (auto i = 0; i < ARG_NUM; ++i) { auto temp = (i + 1.) * ((i + 1) % 2 == 0 ? 1 : -1); for (auto n = 0; n < i + 1; ++n) { temp *= results[threadIdx.x][i]; } results[threadIdx.x][ARG_NUM] += temp; } results[threadIdx.x][ARG_NUM] += BIAS; __syncthreads(); // if (threadIdx.x == 0) { for (auto i = 1; i < BLOCK_SIZE; ++i) { if (abs(results[i][ARG_NUM]) < abs(results[0][ARG_NUM])) { for (auto n = 0; n < ARG_NUM + 1; ++n) { results[0][n] = results[i][n]; } } } for (auto i = 0; i < ARG_NUM + 1; ++i) { result_list[blockIdx.x * (ARG_NUM + 1) + i] = results[0][i]; } } } /** * \brief GPU * \param arg_list * \param result_list GPU */ __global__ void SelectNextGeneration(double* arg_list, const double* result_list) { if (threadIdx.x == 0 && blockIdx.x == 0) { auto bestResult = -1; for (auto j = 0; j < BLOCK_NUM; ++j) { if (abs(result_list[j * (ARG_NUM + 1) + ARG_NUM]) < abs(arg_list[ARG_NUM])) { bestResult = j; } } if (bestResult >= 0) { memcpy(arg_list, &result_list[bestResult * (ARG_NUM + 1)], sizeof(double) * (ARG_NUM + 1)); } } } int main() { // [argv], result const auto hostArgList = static_cast<double*>(malloc(sizeof(double) * (ARG_NUM + 1))); // GPU double* deviceArgList; checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&deviceArgList), sizeof(double) * (ARG_NUM + 1))); // GPU double* deviceResultList; checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&deviceResultList), sizeof(double) * BLOCK_NUM * (ARG_NUM + 1))); // srand(time(nullptr)); for (auto i = 0; i < ARG_NUM; ++i) { hostArgList[i] = (double(rand()) / RAND_MAX - 0.5) * 2 * ARG_LIMIT; } hostArgList[ARG_NUM] = 0.; for (auto i = 0; i < ARG_NUM; ++i) { auto temp = (i + 1.) * ((i + 1) % 2 == 0 ? 1 : -1); for (auto n = 0; n < i + 1; ++n) { temp *= hostArgList[i]; } hostArgList[ARG_NUM] += temp; } hostArgList[ARG_NUM] += BIAS; // CUDA double *deviceRand1, *deviceRand2; checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&deviceRand1), sizeof(double) * RAND_SIZE)); checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&deviceRand2), sizeof(double) * RAND_SIZE)); hiprandGenerator_t deviceRandGenerator; hiprandCreateGenerator(&deviceRandGenerator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(deviceRandGenerator, time(nullptr)); hiprandGenerateUniformDouble(deviceRandGenerator, deviceRand1, RAND_SIZE); // checkCudaErrors(hipMemcpy(deviceArgList, hostArgList, sizeof(double) * (ARG_NUM + 1), hipMemcpyHostToDevice)); const auto start = system_clock::now(); for (auto i = 0; i < ITERATIONS; ++i) { // GPU hipLaunchKernelGGL(( DifferentialEvolution), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, deviceArgList, deviceResultList, i % 2 ? deviceRand2 : deviceRand1); // hiprandGenerateUniformDouble(deviceRandGenerator, i % 2 ? deviceRand1 : deviceRand2, RAND_SIZE); // hipDeviceSynchronize(); hipLaunchKernelGGL(( SelectNextGeneration), dim3(1), dim3(1), 0, 0, deviceArgList, deviceResultList); } const auto elapsedTime = duration_cast<milliseconds>(system_clock::now() - start).count(); printf("Algorithm running time is %lld ms\n", elapsedTime); checkCudaErrors(hipMemcpy(hostArgList, deviceArgList, sizeof(double) * (ARG_NUM + 1), hipMemcpyDeviceToHost)); // for (auto i = 0; i < ARG_NUM; ++i) { printf("x%d = %f\n", i + 1, hostArgList[i]); } printf("Result = %f\n", hostArgList[ARG_NUM]); // auto realResult = 0.; for (auto i = 0; i < ARG_NUM; ++i) { realResult += pow(-1, i + 1) * pow(hostArgList[i], i + 1) * (i + 1); } printf("Validating Result = %f\n", realResult + BIAS); // CPU free(hostArgList); // GPU checkCudaErrors(hipFree(deviceRand1)); checkCudaErrors(hipFree(deviceRand2)); checkCudaErrors(hipFree(deviceArgList)); checkCudaErrors(hipFree(deviceResultList)); }
b98803f36c2deb02feae5ff200db6050bac5fad3.cu
#include <stdio.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <chrono> #include <curand.h> #include "utils.h" using namespace std; using namespace chrono; #define BLOCK_NUM 64 // GPU块数量 #define BLOCK_SIZE 512 // GPU块大小 #define RAND_SIZE 1000 // CUDA随机数队列大小 #define ITERATIONS 1000 // 差分进化次数 #define ARG_NUM 10 // 参数个数 #define ARG_LIMIT 100 // 参数绝对值范围限制 #define BIAS 22 // 偏差 #define F 0.5 // 缩放因子 #define CR 0.3 // 交叉概率 /* * 解方程形如: * * ∑(-1)^i * (x_i)^i * i + BIAS = 0, i > 0 * * 其中,x_i的绝对值小于等于ARG_LIMIT * ARG_NUM决定变量的数量(即i) */ /** * \brief 使用GPU进行差分进化,计算子代的参数。 * \param arg_list 当前最优参数列表 * \param result_list GPU计算得到的最优子代参数及其结果 * \param rand 预生成随机数列表 */ __global__ void DifferentialEvolution(const double* arg_list, double* result_list, const double* rand) { // GPU计算结果缓冲区 __shared__ double results[BLOCK_SIZE][ARG_NUM + 1]; // 随机数下标及步长 auto randIndex = threadIdx.x + 1; const auto step = blockIdx.x + 1; // 变异 for (auto i = 0; i < ARG_NUM; ++i) { int r1, r2, r3; do { r1 = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; r2 = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; r3 = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; } while (r1 == r2 || r2 == r3 || r1 == r3); results[threadIdx.x][i] = arg_list[r1] + F * (arg_list[r2] - arg_list[r3]); if (abs(results[threadIdx.x][i]) > ARG_LIMIT) { results[threadIdx.x][i] = (rand[randIndex] - 0.5) * 2 * ARG_LIMIT; randIndex = (randIndex + step) % RAND_SIZE; } } // 交叉 const auto j = int(rand[randIndex] * ARG_NUM) % ARG_NUM; randIndex = (randIndex + step) % RAND_SIZE; for (auto i = 0; i < ARG_NUM; ++i) { if (i != j && rand[randIndex] > CR) { results[threadIdx.x][i] = arg_list[i]; } randIndex = (randIndex + step) % RAND_SIZE; } // 计算 results[threadIdx.x][ARG_NUM] = 0; for (auto i = 0; i < ARG_NUM; ++i) { auto temp = (i + 1.) * ((i + 1) % 2 == 0 ? 1 : -1); for (auto n = 0; n < i + 1; ++n) { temp *= results[threadIdx.x][i]; } results[threadIdx.x][ARG_NUM] += temp; } results[threadIdx.x][ARG_NUM] += BIAS; __syncthreads(); // 选择 if (threadIdx.x == 0) { for (auto i = 1; i < BLOCK_SIZE; ++i) { if (abs(results[i][ARG_NUM]) < abs(results[0][ARG_NUM])) { for (auto n = 0; n < ARG_NUM + 1; ++n) { results[0][n] = results[i][n]; } } } for (auto i = 0; i < ARG_NUM + 1; ++i) { result_list[blockIdx.x * (ARG_NUM + 1) + i] = results[0][i]; } } } /** * \brief 在GPU上进行子代选择,仅使用单线程 * \param arg_list 当前最优参数列表 * \param result_list GPU计算得到的最优子代参数及其结果 */ __global__ void SelectNextGeneration(double* arg_list, const double* result_list) { if (threadIdx.x == 0 && blockIdx.x == 0) { auto bestResult = -1; for (auto j = 0; j < BLOCK_NUM; ++j) { if (abs(result_list[j * (ARG_NUM + 1) + ARG_NUM]) < abs(arg_list[ARG_NUM])) { bestResult = j; } } if (bestResult >= 0) { memcpy(arg_list, &result_list[bestResult * (ARG_NUM + 1)], sizeof(double) * (ARG_NUM + 1)); } } } int main() { // 当前最优参数列表及其结果([argv], result) const auto hostArgList = static_cast<double*>(malloc(sizeof(double) * (ARG_NUM + 1))); // 最优参数列表在GPU存储中的缓冲区 double* deviceArgList; checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&deviceArgList), sizeof(double) * (ARG_NUM + 1))); // GPU计算得到的最优子代参数及其结果 double* deviceResultList; checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&deviceResultList), sizeof(double) * BLOCK_NUM * (ARG_NUM + 1))); // 初始化种群 srand(time(nullptr)); for (auto i = 0; i < ARG_NUM; ++i) { hostArgList[i] = (double(rand()) / RAND_MAX - 0.5) * 2 * ARG_LIMIT; } hostArgList[ARG_NUM] = 0.; for (auto i = 0; i < ARG_NUM; ++i) { auto temp = (i + 1.) * ((i + 1) % 2 == 0 ? 1 : -1); for (auto n = 0; n < i + 1; ++n) { temp *= hostArgList[i]; } hostArgList[ARG_NUM] += temp; } hostArgList[ARG_NUM] += BIAS; // 初始化CUDA随机数生成器及缓冲区 double *deviceRand1, *deviceRand2; checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&deviceRand1), sizeof(double) * RAND_SIZE)); checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&deviceRand2), sizeof(double) * RAND_SIZE)); curandGenerator_t deviceRandGenerator; curandCreateGenerator(&deviceRandGenerator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(deviceRandGenerator, time(nullptr)); curandGenerateUniformDouble(deviceRandGenerator, deviceRand1, RAND_SIZE); // 差分进化 checkCudaErrors(cudaMemcpy(deviceArgList, hostArgList, sizeof(double) * (ARG_NUM + 1), cudaMemcpyHostToDevice)); const auto start = system_clock::now(); for (auto i = 0; i < ITERATIONS; ++i) { // GPU计算最优子代结果 DifferentialEvolution<<<BLOCK_NUM, BLOCK_SIZE>>>(deviceArgList, deviceResultList, i % 2 ? deviceRand2 : deviceRand1); // 重新生成随机数队列 curandGenerateUniformDouble(deviceRandGenerator, i % 2 ? deviceRand1 : deviceRand2, RAND_SIZE); // 进行子代选择 cudaDeviceSynchronize(); SelectNextGeneration<<<1, 1>>>(deviceArgList, deviceResultList); } const auto elapsedTime = duration_cast<milliseconds>(system_clock::now() - start).count(); printf("Algorithm running time is %lld ms\n", elapsedTime); checkCudaErrors(cudaMemcpy(hostArgList, deviceArgList, sizeof(double) * (ARG_NUM + 1), cudaMemcpyDeviceToHost)); // 输出结果 for (auto i = 0; i < ARG_NUM; ++i) { printf("x%d = %f\n", i + 1, hostArgList[i]); } printf("Result = %f\n", hostArgList[ARG_NUM]); // 测试结果 auto realResult = 0.; for (auto i = 0; i < ARG_NUM; ++i) { realResult += pow(-1, i + 1) * pow(hostArgList[i], i + 1) * (i + 1); } printf("Validating Result = %f\n", realResult + BIAS); // 释放CPU存储 free(hostArgList); // 释放GPU存储 checkCudaErrors(cudaFree(deviceRand1)); checkCudaErrors(cudaFree(deviceRand2)); checkCudaErrors(cudaFree(deviceArgList)); checkCudaErrors(cudaFree(deviceResultList)); }
746d339d775663238833c27650ed0dd2b9e6f4bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" int main(void){ float *a_h, *b_h, *c_h; float *a_d, *b_d, *c_d; const int N = 24; size_t size = N * sizeof(float); a_h = (float *) malloc (size); b_h = (float *) malloc (size); c_h = (float *) malloc (size); for (int i = 0; i < N; i++) { a_h[i] = (float) i; b_h[i] = (float) (i+1); } printf("\nArreglo a:\n"); for (int i = 0; i < N; i++) printf("%f ", a_h[i]); printf("\nArreglo b:\n"); for (int i = 0; i < N; i++) printf("%f ", a_h[i]); hipMalloc((void **) &a_d, size); hipMalloc((void **) &b_d, size); hipMalloc((void **) &c_d, size); hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); int block_size = 8; int n_blocks = N/block_size + (N%block_size == 0 ? 0:1); hipLaunchKernelGGL(( Suma_vectores) , dim3(n_blocks), dim3(block_size) , 0, 0, c_d, a_d, b_d, N); hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); printf("\n\nArreglo c:\n"); for (int i = 0; i < N; i++) printf("%f ", c_h[i]); _getche(); free(a_h); free(b_h); free(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); return(0); }
746d339d775663238833c27650ed0dd2b9e6f4bb.cu
int main(void){ float *a_h, *b_h, *c_h; float *a_d, *b_d, *c_d; const int N = 24; size_t size = N * sizeof(float); a_h = (float *) malloc (size); b_h = (float *) malloc (size); c_h = (float *) malloc (size); for (int i = 0; i < N; i++) { a_h[i] = (float) i; b_h[i] = (float) (i+1); } printf("\nArreglo a:\n"); for (int i = 0; i < N; i++) printf("%f ", a_h[i]); printf("\nArreglo b:\n"); for (int i = 0; i < N; i++) printf("%f ", a_h[i]); cudaMalloc((void **) &a_d, size); cudaMalloc((void **) &b_d, size); cudaMalloc((void **) &c_d, size); cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); int block_size = 8; int n_blocks = N/block_size + (N%block_size == 0 ? 0:1); Suma_vectores <<< n_blocks, block_size >>> (c_d, a_d, b_d, N); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); printf("\n\nArreglo c:\n"); for (int i = 0; i < N; i++) printf("%f ", c_h[i]); _getche(); free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return(0); }
ecdb1c4a161337ce52bf692c1c98047bee5c0d06.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/MarginCriterion.hip" #else void THNN_(MarginCriterion_updateOutput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *output, bool sizeAverage, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_check_nElement(state, input, target); THCUNN_check_dim_size(state, output, 1, 0, 1); THCUNN_assertSameGPU(state, 2, input, target); ptrdiff_t size = THCTensor_(nElement)(state, input); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus<accreal>(), margin_functor<scalar_t, accreal>(ScalarConvert<scalar_t, accreal>::to(margin))); if (sizeAverage) sum /= size; THCTensor_(free)(state, input); THCTensor_(free)(state, target); THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum)); } void THNN_(MarginCriterion_updateGradInput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *gradInput, bool sizeAverage, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_check_nElement(state, input, target); THCUNN_assertSameGPU(state, 3, input, target, gradInput); ptrdiff_t size = THCTensor_(nElement)(state, input); accreal norm = sizeAverage ? 1.f/size : 1; input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); THCTensor_(resizeAs)(state, gradInput, input); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, margin_updateGradInput_functor<scalar_t, accreal>(ScalarConvert<scalar_t, accreal>::to(margin), norm)); THCTensor_(free)(state, input); THCTensor_(free)(state, target); } #endif
ecdb1c4a161337ce52bf692c1c98047bee5c0d06.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/MarginCriterion.cu" #else void THNN_(MarginCriterion_updateOutput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *output, bool sizeAverage, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_check_nElement(state, input, target); THCUNN_check_dim_size(state, output, 1, 0, 1); THCUNN_assertSameGPU(state, 2, input, target); ptrdiff_t size = THCTensor_(nElement)(state, input); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus<accreal>(), margin_functor<scalar_t, accreal>(ScalarConvert<scalar_t, accreal>::to(margin))); if (sizeAverage) sum /= size; THCTensor_(free)(state, input); THCTensor_(free)(state, target); THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum)); } void THNN_(MarginCriterion_updateGradInput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *gradInput, bool sizeAverage, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_check_nElement(state, input, target); THCUNN_assertSameGPU(state, 3, input, target, gradInput); ptrdiff_t size = THCTensor_(nElement)(state, input); accreal norm = sizeAverage ? 1.f/size : 1; input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); THCTensor_(resizeAs)(state, gradInput, input); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, margin_updateGradInput_functor<scalar_t, accreal>(ScalarConvert<scalar_t, accreal>::to(margin), norm)); THCTensor_(free)(state, input); THCTensor_(free)(state, target); } #endif
5704eeef1c5c2404b4f65a3743147573e5f796b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <time.h> #include <algorithm> #include <cassert> #include <cstdlib> #include <iterator> #include <vector> #define THREAD 256 #include <bits/stdc++.h> #include <chrono> using namespace std::chrono; using namespace std; __global__ void markPrimes(int step,int k, char *d_primes, int n) { int index = blockIdx.x*blockDim.x + threadIdx.x; int begin = step * step; if (begin == 0){ begin = step*step; } int end = (index + 1) * k; for ( int i = begin; i <= end; i += step) //begin + step { if (i<n) d_primes[i] = 0; } } int main(int argc, char *argv[]) { int n, N, k, blocks; char *primes, *d_primes; n = atoi(argv[1]); N = ceill((long double)sqrt(n)); k = ceill((long double)n / (long double)N); blocks = ceill((long double)N / (long double)THREAD); primes = ( char*)malloc(n*sizeof(char)); for (int i = 0; i < n; i++) { if (i%2!=0 || i==2 ) primes[i] = 1; } hipMalloc(&d_primes, n * sizeof(char)); hipMemcpy(d_primes, primes, n * sizeof(char), hipMemcpyHostToDevice); clock_t begin = clock(); for (int i = 2; i <= N; i++) { if (primes[i]) { markPrimes << <blocks, THREAD >> > (i, k, d_primes, n); } } hipMemcpy(primes, d_primes, n * sizeof(char), hipMemcpyDeviceToHost); clock_t end = clock(); primes[0] = 0; primes[1] = 0; int count = 0; for (int i = 0; i < n; i++) { if (primes[i]) { //cout << i << "\t"; count++; } } cout << "\nNumber of primes less than " << n << ": " << count << endl; double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000; cout << "The running time is " << time_spent << " milliseconds." << endl; hipFree(d_primes); }
5704eeef1c5c2404b4f65a3743147573e5f796b5.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <time.h> #include <algorithm> #include <cassert> #include <cstdlib> #include <iterator> #include <vector> #define THREAD 256 #include <bits/stdc++.h> #include <chrono> using namespace std::chrono; using namespace std; __global__ void markPrimes(int step,int k, char *d_primes, int n) { int index = blockIdx.x*blockDim.x + threadIdx.x; int begin = step * step; if (begin == 0){ begin = step*step; } int end = (index + 1) * k; for ( int i = begin; i <= end; i += step) //begin + step { if (i<n) d_primes[i] = 0; } } int main(int argc, char *argv[]) { int n, N, k, blocks; char *primes, *d_primes; n = atoi(argv[1]); N = ceill((long double)sqrt(n)); k = ceill((long double)n / (long double)N); blocks = ceill((long double)N / (long double)THREAD); primes = ( char*)malloc(n*sizeof(char)); for (int i = 0; i < n; i++) { if (i%2!=0 || i==2 ) primes[i] = 1; } cudaMalloc(&d_primes, n * sizeof(char)); cudaMemcpy(d_primes, primes, n * sizeof(char), cudaMemcpyHostToDevice); clock_t begin = clock(); for (int i = 2; i <= N; i++) { if (primes[i]) { markPrimes << <blocks, THREAD >> > (i, k, d_primes, n); } } cudaMemcpy(primes, d_primes, n * sizeof(char), cudaMemcpyDeviceToHost); clock_t end = clock(); primes[0] = 0; primes[1] = 0; int count = 0; for (int i = 0; i < n; i++) { if (primes[i]) { //cout << i << "\t"; count++; } } cout << "\nNumber of primes less than " << n << ": " << count << endl; double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000; cout << "The running time is " << time_spent << " milliseconds." << endl; cudaFree(d_primes); }
78bf1ef384ca18e4b13d85f79d47c6de7ddd970d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <fstream> #include <chrono> #define IMAGE_WIDTH 3840 #define IMAGE_HEIGH 2160 #define IMAGE_CH 3u #define IMAGE_OFF 54u __global__ void kernelYUV2RGB(unsigned char *a, unsigned char *b) { int i = 3*blockIdx.x; int c = b[i+0] - 16; int d = b[i+1] - 128; int e = b[i+2] - 128; a[i+0]=( 298 * c + 409 * e + 128) >> 8; a[i+1]= ( 298 * c - 100 * d - 208*e + 128) >> 8; a[i+2] = ( 298 * c + 516 * d + 128) >> 8; } __global__ void kernelRGB2YUV(unsigned char *a, unsigned char *db) { int i = 3*blockIdx.x; int r = db[i+0]; int g = db[i+1]; int b = db[i+2]; a[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; a[i+1] = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; a[i+2] = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; } __global__ void kernelRGB2YUV422(unsigned char *a, unsigned char *db) { int i = 3*blockIdx.x; int r = db[i+0]; int g = db[i+1]; int b = db[i+2]; int r1 = db[i+0+3]; int g1 = db[i+1+3]; int b1 = db[i+2+3]; int u1,u2; a[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; a[i+0+3] = ((66* r1 + 129*g1 +25* b1 + 128) >> 8 ) + 16 ; u1 = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; u2 = ((-38*r1 - 74* g1 +112*b1 + 128) >> 8 ) + 128 ; a[i+1] = a[i+1+3] = (u1+u2)/2; u1 = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; u2 = ((112*r1 - 94* g1 -18*b1 + 128) >> 8 ) + 128 ; a[i+2] = a[i+2+3] = (u1+u2)/2; } using namespace std; using namespace std::chrono; double calculatePSNR(unsigned char* bufYUV, unsigned char* bufRGB, unsigned int len) { double MSER = 0; double MSEG = 0; double MSEB = 0; double MSE = 0; double PSNR =0; for(unsigned int i = 0 ; i < len;i++) { MSE += pow((bufYUV[i] -bufRGB[i]),2); } MSE = (MSE)/(len); PSNR = 10*log10((255.0*255.0)/MSE); std::cout<<"MSE "<<MSE<<std::endl; return PSNR; } bool readImageSimd(std::string imageName , char* buf ) { bool retVal = true ; int off = 0; int wdth = 0 ; int height = 0; std::ifstream ImageFile(imageName, std::ios::binary | std::ios::ate); if(ImageFile.fail()) return false; //temproray buffer to hold the image as char buffer auto sz= ImageFile.tellg(); //temproray buffer to hold the image as char buffer std::cout<<"reading "<<sz<<std::endl; ImageFile.seekg(0, std::ios::beg); ImageFile.read(buf, sz); ImageFile.close(); return true; } void converRGBtoYUV(unsigned char* bufRGB,unsigned char *bufYUV,unsigned int len) { int off = IMAGE_OFF; for(int i = 0 ; i < off ;i++) bufYUV[i] = bufRGB[i]; //for each pixel for(int i = off ; i < len ; i+=3) { int r = bufRGB[i+0]; int g = bufRGB[i+1]; int b = bufRGB[i+2]; bufYUV[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; bufYUV[i+1] = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; bufYUV[i+2] = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; } } static unsigned char SIMD_bufRGB[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; static unsigned char SIMD_bufRGB1[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; static unsigned char SIMD_bufYUV[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; int main() { string fileName1 = "testo.bmp"; unsigned int len = (IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF); unsigned char *da, *db; hipMalloc((void **)&da, len*sizeof(char)); hipMalloc((void **)&db, len*sizeof(char)); cout<<"[+]Testing using SIMD\n"; //read Image from buffer directly so data are next to each other readImageSimd(fileName1,(char*) SIMD_bufRGB); cout<<"[+]Converting to YUV\n"; auto start = high_resolution_clock::now(); hipMemcpy(db, SIMD_bufRGB, len*sizeof(char), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernelRGB2YUV), dim3(IMAGE_WIDTH*IMAGE_HEIGH), dim3(1), 0, 0, da, db); hipMemcpy(SIMD_bufYUV, da, len*sizeof(char), hipMemcpyDeviceToHost); auto stop = high_resolution_clock::now(); auto duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Elapsed "<<duration.count() << " ms" <<endl; //converRGBtoYUV(SIMD_bufRGB,SIMD_bufYUV,len); cout<<"[+]Converting to YUV422\n"; start = high_resolution_clock::now(); hipMemcpy(db, SIMD_bufRGB, len*sizeof(char), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernelRGB2YUV422), dim3((IMAGE_WIDTH*IMAGE_HEIGH)/2), dim3(1), 0, 0, da, db); hipMemcpy(SIMD_bufYUV, da, len*sizeof(char), hipMemcpyDeviceToHost); stop = high_resolution_clock::now(); duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Converting to RGB\n"; start = high_resolution_clock::now(); hipMemcpy(db, SIMD_bufYUV, len*sizeof(char), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernelYUV2RGB), dim3(IMAGE_WIDTH*IMAGE_HEIGH), dim3(1), 0, 0, da, db); hipMemcpy(SIMD_bufRGB1, da, len*sizeof(char), hipMemcpyDeviceToHost); stop = high_resolution_clock::now(); duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Elapsed "<<duration.count() << " ms" <<endl; hipFree(da); hipFree(db); for(int i = 0 ; i < IMAGE_OFF ; i++) SIMD_bufRGB1[i] = SIMD_bufYUV[i]; cout<<"[+]Calculating PSNR\n"; double psnr = calculatePSNR((unsigned char*)SIMD_bufRGB1,(unsigned char*)SIMD_bufRGB,len); cout<<"psnr "<<psnr<<endl; return 0; }
78bf1ef384ca18e4b13d85f79d47c6de7ddd970d.cu
#include <stdio.h> #include <iostream> #include <fstream> #include <chrono> #define IMAGE_WIDTH 3840 #define IMAGE_HEIGH 2160 #define IMAGE_CH 3u #define IMAGE_OFF 54u __global__ void kernelYUV2RGB(unsigned char *a, unsigned char *b) { int i = 3*blockIdx.x; int c = b[i+0] - 16; int d = b[i+1] - 128; int e = b[i+2] - 128; a[i+0]=( 298 * c + 409 * e + 128) >> 8; a[i+1]= ( 298 * c - 100 * d - 208*e + 128) >> 8; a[i+2] = ( 298 * c + 516 * d + 128) >> 8; } __global__ void kernelRGB2YUV(unsigned char *a, unsigned char *db) { int i = 3*blockIdx.x; int r = db[i+0]; int g = db[i+1]; int b = db[i+2]; a[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; a[i+1] = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; a[i+2] = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; } __global__ void kernelRGB2YUV422(unsigned char *a, unsigned char *db) { int i = 3*blockIdx.x; int r = db[i+0]; int g = db[i+1]; int b = db[i+2]; int r1 = db[i+0+3]; int g1 = db[i+1+3]; int b1 = db[i+2+3]; int u1,u2; a[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; a[i+0+3] = ((66* r1 + 129*g1 +25* b1 + 128) >> 8 ) + 16 ; u1 = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; u2 = ((-38*r1 - 74* g1 +112*b1 + 128) >> 8 ) + 128 ; a[i+1] = a[i+1+3] = (u1+u2)/2; u1 = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; u2 = ((112*r1 - 94* g1 -18*b1 + 128) >> 8 ) + 128 ; a[i+2] = a[i+2+3] = (u1+u2)/2; } using namespace std; using namespace std::chrono; double calculatePSNR(unsigned char* bufYUV, unsigned char* bufRGB, unsigned int len) { double MSER = 0; double MSEG = 0; double MSEB = 0; double MSE = 0; double PSNR =0; for(unsigned int i = 0 ; i < len;i++) { MSE += pow((bufYUV[i] -bufRGB[i]),2); } MSE = (MSE)/(len); PSNR = 10*log10((255.0*255.0)/MSE); std::cout<<"MSE "<<MSE<<std::endl; return PSNR; } bool readImageSimd(std::string imageName , char* buf ) { bool retVal = true ; int off = 0; int wdth = 0 ; int height = 0; std::ifstream ImageFile(imageName, std::ios::binary | std::ios::ate); if(ImageFile.fail()) return false; //temproray buffer to hold the image as char buffer auto sz= ImageFile.tellg(); //temproray buffer to hold the image as char buffer std::cout<<"reading "<<sz<<std::endl; ImageFile.seekg(0, std::ios::beg); ImageFile.read(buf, sz); ImageFile.close(); return true; } void converRGBtoYUV(unsigned char* bufRGB,unsigned char *bufYUV,unsigned int len) { int off = IMAGE_OFF; for(int i = 0 ; i < off ;i++) bufYUV[i] = bufRGB[i]; //for each pixel for(int i = off ; i < len ; i+=3) { int r = bufRGB[i+0]; int g = bufRGB[i+1]; int b = bufRGB[i+2]; bufYUV[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; bufYUV[i+1] = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; bufYUV[i+2] = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; } } static unsigned char SIMD_bufRGB[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; static unsigned char SIMD_bufRGB1[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; static unsigned char SIMD_bufYUV[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; int main() { string fileName1 = "testo.bmp"; unsigned int len = (IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF); unsigned char *da, *db; cudaMalloc((void **)&da, len*sizeof(char)); cudaMalloc((void **)&db, len*sizeof(char)); cout<<"[+]Testing using SIMD\n"; //read Image from buffer directly so data are next to each other readImageSimd(fileName1,(char*) SIMD_bufRGB); cout<<"[+]Converting to YUV\n"; auto start = high_resolution_clock::now(); cudaMemcpy(db, SIMD_bufRGB, len*sizeof(char), cudaMemcpyHostToDevice); kernelRGB2YUV<<<IMAGE_WIDTH*IMAGE_HEIGH, 1>>>(da, db); cudaMemcpy(SIMD_bufYUV, da, len*sizeof(char), cudaMemcpyDeviceToHost); auto stop = high_resolution_clock::now(); auto duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Elapsed "<<duration.count() << " ms" <<endl; //converRGBtoYUV(SIMD_bufRGB,SIMD_bufYUV,len); cout<<"[+]Converting to YUV422\n"; start = high_resolution_clock::now(); cudaMemcpy(db, SIMD_bufRGB, len*sizeof(char), cudaMemcpyHostToDevice); kernelRGB2YUV422<<<(IMAGE_WIDTH*IMAGE_HEIGH)/2, 1>>>(da, db); cudaMemcpy(SIMD_bufYUV, da, len*sizeof(char), cudaMemcpyDeviceToHost); stop = high_resolution_clock::now(); duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Converting to RGB\n"; start = high_resolution_clock::now(); cudaMemcpy(db, SIMD_bufYUV, len*sizeof(char), cudaMemcpyHostToDevice); kernelYUV2RGB<<<IMAGE_WIDTH*IMAGE_HEIGH, 1>>>(da, db); cudaMemcpy(SIMD_bufRGB1, da, len*sizeof(char), cudaMemcpyDeviceToHost); stop = high_resolution_clock::now(); duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Elapsed "<<duration.count() << " ms" <<endl; cudaFree(da); cudaFree(db); for(int i = 0 ; i < IMAGE_OFF ; i++) SIMD_bufRGB1[i] = SIMD_bufYUV[i]; cout<<"[+]Calculating PSNR\n"; double psnr = calculatePSNR((unsigned char*)SIMD_bufRGB1,(unsigned char*)SIMD_bufRGB,len); cout<<"psnr "<<psnr<<endl; return 0; }
f9756ca278432421002b9378a3d2f9f1fcc90d78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/async/for_each.h> #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) #include <hip/hip_cooperative_groups.h> #endif const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { HIPSPARSE_MV_ALG_DEFAULT = 0, HIPSPARSE_COOMV_ALG = 1, HIPSPARSE_CSRMV_ALG1 = 2, HIPSPARSE_CSRMV_ALG2 = 3 } hipsparseSpMVAlg_t; typedef enum { HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1, HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2, HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, HIPSPARSE_SPMM_COO_ALG1 = 1, HIPSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, HIPSPARSE_CSRMM_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } hipsparseSpMMAlg_t; typedef enum { HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc } hipsparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "hipsparseSpMVAlg_t","CUSPARSE_",0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","hipsparseSpMMAlg_t","CUSPARSE_SPMM_",0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","hipsparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool); PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]); PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]); PetscErrorCode MatCUSPARSESetStream(Mat A,const hipStream_t stream) { hipsparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); cusparsestruct->stream = stream; stat = hipsparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetHandle(Mat A,const hipsparseHandle_t handle) { hipsparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); if (cusparsestruct->handle != handle) { if (cusparsestruct->handle) { stat = hipsparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat); } cusparsestruct->handle = handle; } stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSEClearHandle(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscBool flg; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg || !cusparsestruct) PetscFunctionReturn(0); if (cusparsestruct->handle) cusparsestruct->handle = 0; PetscFunctionReturn(0); } PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(0); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); (*B)->factortype = ftype; ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILUDT]);CHKERRQ(ierr); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); (*B)->canuseordering = PETSC_TRUE; ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); } PetscFunctionReturn(0); } /*@ MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular operation. Only the MatMult operation can use different GPU storage formats for MPIAIJCUSPARSE matrices. Not Collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) Output Parameter: Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg) { PetscErrorCode ierr; PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);} A->form_explicit_transpose = flg; break; default: ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr); break; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS isrow = b->row,iscol = b->col; PetscBool row_identity,col_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { PetscErrorCode ierr; MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);} ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "hipsparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ if (flg && HIPSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "hipsparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr); if (flg && HIPSPARSE_CSRMM_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "hipsparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr); if (flg && HIPSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; hipsparseStatus_t stat; const PetscInt *ai = a->i,*aj = a->j,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiLo, *AjLo; PetscInt i,nz, nzLower, offset, rowOffset; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower=n+ai[n]-ai[1]; if (!loTriFactor) { PetscScalar *AALo; cerr = hipHostMalloc((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the lower triangular matrix */ cerr = hipHostMalloc((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt) 0; AiLo[n] = nzLower; AjLo[0] = (PetscInt) 0; AALo[0] = (MatScalar) 1.0; v = aa; vi = aj; offset = 1; rowOffset= 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz+1; ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr); offset += nz; AjLo[offset] = (PetscInt) i; AALo[offset] = (MatScalar) 1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; cerr = hipHostFree(AiLo);CHKERRCUDA(cerr); cerr = hipHostFree(AjLo);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } else { /* update values only */ if (!loTriFactor->AA_h) { cerr = hipHostMalloc((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower); ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; hipsparseStatus_t stat; const PetscInt *aj = a->j,*adiag = a->diag,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiUp, *AjUp; PetscInt i,nz, nzUpper, offset; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0]-adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the upper triangular matrix */ cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; vi = aj + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1./v[nz]; AiUp[i] = AiUp[i+1] - (nz+1); ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr); } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; cerr = hipHostFree(AiUp);CHKERRCUDA(cerr); cerr = hipHostFree(AjUp);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } else { if (!upTriFactor->AA_h) { cerr = hipHostMalloc((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the upper triangular matrix */ offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1./v[nz]; ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper); ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS isrow = a->row,iscol = a->icol; PetscBool row_identity,col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* upper triangular indices */ ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; hipsparseStatus_t stat; PetscErrorCode ierr; hipError_t cerr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; const PetscInt *ai = b->i,*aj = b->j,*vj; const MatScalar *aa = b->a,*v; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1.0/v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr); cerr = hipHostFree(AiUp);CHKERRCUDA(cerr); cerr = hipHostFree(AjUp);CHKERRCUDA(cerr); } else { /* Fill the upper triangular matrix */ offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0/v[nz]; AALo[offset] = 1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); } cerr = hipHostFree(AAUp);CHKERRCUDA(cerr); cerr = hipHostFree(AALo);CHKERRCUDA(cerr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=(a->nz-n)*2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (!perm_identity) { IS iip; const PetscInt *irip,*rip; ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr); ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr); ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip+n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip+n); ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr); ierr = ISDestroy(&iip);CHKERRQ(ierr); ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } #define CHECK_LAUNCH_ERROR() \ do { \ /* Check synchronous errors, i.e. pre-launch */ \ hipError_t err = hipGetLastError(); \ if (hipSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \ } \ /* Check asynchronous errors, i.e. kernel failed (ULF) */ \ err = hipDeviceSynchronize(); \ if (hipSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \ } \ } while (0) static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS ip = b->row; PetscBool perm_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; hipsparseMatrixType_t matrixType; hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr); loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = hipsparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr); upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = hipsparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(0); } struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTransposeForMult(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; hipError_t err; PetscErrorCode ierr; PetscFunctionBegin; if (!A->form_explicit_transpose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(0); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); if (cusparsestruct->format != MAT_CUSPARSE_CSR) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); } if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; stat = hipsparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat); indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = hipsparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); /* set alpha and beta */ err = hipMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); } cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat); #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());CHKERRCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY*) tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; delete (CsrMatrix*) tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY*) temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; delete (CsrMatrix*) temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat; if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix"); if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix rows"); if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix cols"); if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix values"); if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT"); if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT rows"); if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT cols"); if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat); err = hipMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n,matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) err = hipFree(csr2cscBuffer);CHKERRCUDA(err); #endif } PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(0); } /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Then, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Last, reorder with the column permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Next, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat; ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); cerr = hipMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); *array = a->a; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt m = A->rmap->n,*ii,*ridx,tmp; PetscErrorCode ierr; hipsparseStatus_t stat; PetscBool both = PETSC_TRUE; hipError_t err; PetscFunctionBegin; if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix*)cusparsestruct->mat->mat; if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR values"); ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); matrix->values->assign(a->a, a->a+a->nz); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); } else { PetscInt nnz; ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR row data"); if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR column data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; stat = hipsparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); err = hipMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = hipsparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } #endif } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY*)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets; delete (CsrMatrix*)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx,ridx+m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ hipsparseDnMatDescr_t matBDescr; hipsparseDnMatDescr_t matCDescr; PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/ size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ hipsparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { PetscErrorCode ierr; MatMatCusparse *mmdata = (MatMatCusparse *)data; hipError_t cerr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseStatus_t stat; #endif PetscFunctionBegin; cerr = hipFree(mmdata->Bt);CHKERRCUDA(cerr); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mmdata->matSpBDescr) { stat = hipsparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); } if (mmdata->mmBuffer) { cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); } if (mmdata->mmBuffer2) { cerr = hipFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); } if (mmdata->matBDescr) { stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); } if (mmdata->matCDescr) { stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); } if (mmdata->spgemmDesc) { stat = hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); } #endif ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr); ierr = PetscFree(data);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool); static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n,blda,clda; PetscBool flg,biscuda; Mat_SeqAIJCUSPARSE *cusp; hipsparseStatus_t stat; hipsparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; hipError_t cerr; PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty"); mmdata = (MatMatCusparse*)product->data; A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = HIPSPARSE_OPERATION_TRANSPOSE; } else { ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr); mat = cusp->matTranspose; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix*)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr); if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);} ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allcoate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) {stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;} if (!mmdata->matBDescr) { stat = hipsparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) {stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;} if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ stat = hipsparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Clda = clda; } if (!mat->matDescr) { stat = hipsparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } stat = hipsparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); cerr = hipMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ stat = hipsparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat); stat = hipsparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat); stat = hipsparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat); } /* do hipsparseSpMM, which supports transpose on B */ stat = hipsparseSpMM(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { hipblasHandle_t cublasv2handle; hipblasStatus_t cerr; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); cerr = cublasXgeam(cublasv2handle,HIPBLAS_OP_T,HIPBLAS_OP_T, B->cmap->n,B->rmap->n, &PETSC_CUSPARSE_ONE ,barray,blda, &PETSC_CUSPARSE_ZERO,barray,blda, mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k, csrmat->num_entries,mat->alpha_one,mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero, carray,clda);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); } else if (product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr); } if (mmdata->cisdense) { ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr); } if (!biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n; PetscBool cisdense,flg; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr); /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11,0,0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { hipError_t cerr = hipMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr); } #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr); ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr); } else { ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(0); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscBool flg; PetscErrorCode ierr; hipsparseStatus_t stat; hipError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseSpMatDescr_t BmatSpDescr; #endif PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty"); ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for C of type %s",((PetscObject)C)->type_name); mmdata = (MatMatCusparse*)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]); Ccsr = (CsrMatrix*)Cmat->mat; if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name); if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB; if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB; switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } Cmat = Ccusp->mat; if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]); if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix*)Cmat->mat; if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct"); if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct"); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #else stat = cusparse_csr_spgemm(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr); ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *a,*b,*c; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt i,j,m,n,k; PetscBool flg; PetscErrorCode ierr; hipsparseStatus_t stat; hipError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed,ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) int64_t C_num_rows1, C_num_cols1, C_nnz1; size_t bufSize2; hipsparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name); a = (Mat_SeqAIJ*)A->data; b = (Mat_SeqAIJ*)B->data; Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB; if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB; biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } /* create cusparse matrix */ ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)C->data; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr); ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1); stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix*)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i+1]; for (j=st; j<en; j++) { const PetscInt brow = a->j[j]; flops += 2.*(b->i[brow+1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt anzi = a->i[i+1] - a->i[i]; const PetscInt bnzi = b->i[i+1] - b->i[i]; flops += (2.*anzi)*bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); stat = hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat); /* ask bufferSize bytes for external memory */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ cerr = hipMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr); /* compute the intermediate product of A * B */ stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat); c->nz = (PetscInt) C_nnz1; ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());CHKERRCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #else stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); stat = hipsparseXcsrgemmNnz(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; cerr = hipMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; cerr = hipMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r+1] = old; } for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows]; } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k+1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); Ccsr->num_entries = c->nz; C->nonzerostate++; ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscErrorCode ierr; PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat,1); ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr); if (!product->A->boundtocpu && !product->B->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr); } if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr); } } if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray,*zarray,*dptr,*beta,*xptr; PetscErrorCode ierr; hipError_t cerr; hipsparseStatus_t stat; hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) PetscInt nx,ny; #endif PetscFunctionBegin; if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Hermitian and not transpose not supported"); if (!a->nonzerorowcnt) { if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);} else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);} PetscFunctionReturn(0); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);} matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */ else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */ ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat); stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat); } stat = hipsparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTransposeForMult() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat); #else CsrMatrix *mat = (CsrMatrix*)matstruct->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred) and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); hipLaunchKernelGGL(( ScatterAdd), dim3((n+255)/256),dim3(256),0,PetscDefaultCudaStream, n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } else { if (yy && yy != zz) { ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);} else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);} } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } if (yy) { ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr); } else { ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; PetscSplitCSRDataStructure *d_mat = NULL; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat; } ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); // this does very little if assembled on GPU - call it? if (mode == MAT_FLUSH_ASSEMBLY || A->boundtocpu) PetscFunctionReturn(0); if (d_mat) { A->offloadmask = PETSC_OFFLOAD_GPU; } PetscFunctionReturn(0); } /* --------------------------------------------------------------------------------*/ /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscSplitCSRDataStructure *d_mat = NULL; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat; ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat = NULL; ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); } if (d_mat) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; hipError_t err; PetscSplitCSRDataStructure h_mat; ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr); err = hipMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), hipMemcpyDeviceToHost);CHKERRCUDA(err); if (a->compressedrow.use) { err = hipFree(h_mat.diag.i);CHKERRCUDA(err); } err = hipFree(d_mat);CHKERRCUDA(err); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str) { PetscErrorCode ierr; Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry,*csrx; hipError_t cerr; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE*)Y->spptr; cx = (Mat_SeqAIJCUSPARSE*)X->spptr; if (X->ops->axpy != Y->ops->axpy) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); PetscFunctionReturn(0); } /* if we are here, it means both matrices are bound to GPU */ ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr); if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported"); if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix*)cy->mat->mat; csrx = (CsrMatrix*)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin()); if (eq) { eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin()); } if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { hipsparseStatus_t stat; PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) size_t bufferSize; void *buffer; #endif ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&buffer,bufferSize);CHKERRCUDA(cerr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); cerr = hipFree(buffer);CHKERRCUDA(cerr); #else ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else if (str == SAME_NONZERO_PATTERN) { hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscBLASInt one = 1, bnz = 1; ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a) { PetscErrorCode ierr; Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data; PetscScalar *ay; hipError_t cerr; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr; if (spptr->mat) { CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } if (spptr->matTranspose) { CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat; if (matrix->values) { thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } } //ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr); ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0); if (flg) { ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); } A->boundtocpu = flg; a->inode.use = flg; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat) { PetscErrorCode ierr; hipsparseStatus_t stat; Mat B; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr); } else if (reuse == MAT_REUSE_MATRIX) { ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */ spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*); PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { PetscErrorCode ierr; hipsparseStatus_t stat; PetscFunctionBegin; if (*cusparsestruct) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr); delete (*cusparsestruct)->workVector; delete (*cusparsestruct)->rowoffsets_gpu; delete (*cusparsestruct)->cooPerm; delete (*cusparsestruct)->cooPerm_a; delete (*cusparsestruct)->csr2csc_i; if ((*cusparsestruct)->handle) {stat = hipsparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);} ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { hipsparseStatus_t stat; PetscErrorCode ierr; PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) { stat = hipsparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); } if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); } ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); if ((*trifactor)->solveBuffer) {hipError_t cerr = hipFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);} if ((*trifactor)->AA_h) {hipError_t cerr = hipHostFree((*trifactor)->AA_h);CHKERRCUDA(cerr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if ((*trifactor)->csr2cscBuffer) {hipError_t cerr = hipFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);} #endif ierr = PetscFree(*trifactor);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) { CsrMatrix *mat; hipsparseStatus_t stat; hipError_t err; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat); #endif } else { mat = (CsrMatrix*)(*matstruct)->mat; CsrMatrix_Destroy(&mat); } } if ((*matstruct)->descr) { stat = hipsparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); } delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) { err=hipFree((*matstruct)->alpha_one);CHKERRCUDA(err); } if ((*matstruct)->beta_zero) { err=hipFree((*matstruct)->beta_zero);CHKERRCUDA(err); } if ((*matstruct)->beta_one) { err=hipFree((*matstruct)->beta_one);CHKERRCUDA(err); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) {stat = hipsparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);} for (int i=0; i<3; i++) { if (mdata->cuSpMV[i].initialized) { err = hipFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err); stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat); stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors** trifactors) { PetscErrorCode ierr; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr); delete (*trifactors)->rpermIndices; delete (*trifactors)->cpermIndices; delete (*trifactors)->workVector; (*trifactors)->rpermIndices = NULL; (*trifactors)->cpermIndices = NULL; (*trifactors)->workVector = NULL; if ((*trifactors)->a_band_d) {hipError_t cerr = hipFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);} if ((*trifactors)->i_band_d) {hipError_t cerr = hipFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);} } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) { PetscErrorCode ierr; hipsparseHandle_t handle; hipsparseStatus_t stat; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr); if (handle = (*trifactors)->handle) { stat = hipsparseDestroy(handle);CHKERRCUSPARSE(stat); } ierr = PetscFree(*trifactors);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct IJCompare { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct IJEqual { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false; return true; } }; struct IJDiff { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; } }; struct IJSum { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1||t2; } }; #include <thrust/iterator/discard_iterator.h> PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; THRUSTARRAY *cooPerm_v = NULL; thrust::device_ptr<const PetscScalar> d_v; CsrMatrix *matrix; PetscErrorCode ierr; hipError_t cerr; PetscInt n; PetscFunctionBegin; if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix"); if (!cusp->cooPerm) { ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); } matrix = (CsrMatrix*)cusp->mat->mat; if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); if (!v) { if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); goto finalize; } n = cusp->cooPerm->size(); if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { cooPerm_v = new THRUSTARRAY(n); cooPerm_v->assign(v,v+n); d_v = cooPerm_v->data(); ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */ if (cusp->cooPerm_a) { THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size()); auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>()); delete cooPerm_w; } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAPlusEquals()); } } else { if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */ auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAEquals()); } } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalize: delete cooPerm_v; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr); ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr); a->reallocs = 0; A->info.mallocs += 0; A->info.nz_unneeded = 0; A->assembled = A->was_assembled = PETSC_TRUE; A->num_ass++; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(0); if (destroy) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(0); } #include <thrust/binary_search.h> PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[]) { PetscErrorCode ierr; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt cooPerm_n, nzr = 0; hipError_t cerr; PetscFunctionBegin; ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0; if (n != cooPerm_n) { delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; } if (n) { THRUSTINTARRAY d_i(n); THRUSTINTARRAY d_j(n); THRUSTINTARRAY ii(A->rmap->n); if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); } if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); } ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); d_i.assign(coo_i,coo_i+n); d_j.assign(coo_j,coo_j+n); auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin())); auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end())); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0); thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); *cusp->cooPerm_a = d_i; THRUSTINTARRAY w = d_j; auto nekey = thrust::unique(fkey, ekey, IJEqual()); if (nekey == ekey) { /* all entries are unique */ delete cusp->cooPerm_a; cusp->cooPerm_a = NULL; } else { /* I couldn't come up with a more elegant algorithm */ adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff()); adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff()); (*cusp->cooPerm_a)[0] = 0; w[0] = 0; thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum()); thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>()); } thrust::counting_iterator<PetscInt> search_begin(0); thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(), search_begin, search_begin + A->rmap->n, ii.begin()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr); a->singlemalloc = PETSC_FALSE; a->free_a = PETSC_TRUE; a->free_ij = PETSC_TRUE; ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr); a->i[0] = 0; cerr = hipMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); a->nz = a->maxnz = a->i[A->rmap->n]; a->rmax = 0; ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr); ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr); cerr = hipMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); } if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); } for (PetscInt i = 0; i < A->rmap->n; i++) { const PetscInt nnzr = a->i[i+1] - a->i[i]; nzr += (PetscInt)!!(nnzr); a->ilen[i] = a->imax[i] = nnzr; a->rmax = PetscMax(a->rmax,nnzr); } a->nonzerorowcnt = nzr; A->preallocated = PETSC_TRUE; ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr); ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr); } else { ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr); } ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); /* We want to allocate the CUSPARSE struct for matvec now. The code is so convoluted now that I prefer to copy zeros */ ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr); ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_CPU; A->nonzerostate++; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); A->assembled = PETSC_FALSE; A->was_assembled = PETSC_FALSE; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a) { PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } struct IJCompare4 { __host__ __device__ inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) {} __host__ __device__ inline int operator() (const int &c) { return c + _shift; } }; /* merges to SeqAIJCUSPARSE matrices, [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt Annz,Bnnz; hipsparseStatus_t stat; PetscInt i,m,n,zero = 0; hipError_t cerr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidHeaderSpecific(B,MAT_CLASSID,2); PetscValidPointer(C,4); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); PetscCheckTypeName(B,MATSEQAIJCUSPARSE); if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n); if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported"); if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr); ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m+1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->cooPerm = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff,*Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = hipsparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10,0,0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin())); auto p1 = Ccusp->cooPerm->begin(); auto p2 = Ccusp->cooPerm->begin(); thrust::advance(p2,Annz); PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10,0,0) thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred)); PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred)); #endif stat = hipsparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n+1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT); thrust::advance(rT,-1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz)); thrust::copy(titb,tite,rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT); if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); stat = hipsparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; cerr = hipMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i+1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); (*C)->nonzerostate++; ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n); c = (Mat_SeqAIJ*)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm"); if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Ccsr = (CsrMatrix*)Ccusp->mat->mat; if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size()); if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size()); if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size()); if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries); if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size()); auto pmid = Ccusp->cooPerm->begin(); thrust::advance(pmid,Acsr->num_entries); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); thrust::for_each(zibait,zieait,VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end()))); thrust::for_each(zibbit,ziebit,VecCUDAEquals()); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); (*C)->transupdated = PETSC_TRUE; } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { PetscErrorCode ierr; bool dmem; const PetscScalar *av; hipError_t cerr; PetscFunctionBegin; dmem = isCudaMem(v); ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx,idx+n); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n)); thrust::for_each(zibit,zieit,VecCUDAEquals()); if (w) { cerr = hipMemcpy(v,w->data().get(),n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } delete w; } else { cerr = hipMemcpy(v,av,n*sizeof(PetscScalar),dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr); PetscFunctionReturn(0); } /* LU BAND factorization with optimization for block diagonal (Nf blocks) in natural order (-mat_no_inode -pc_factor_mat_ordering_type rcm with Nf>1 fields) requires: structurally symmetric: fix with transpose/column meta data */ /* The GPU LU factor kernel */ __global__ void __launch_bounds__(1024,1) mat_lu_factor_band_init_set_i(const PetscInt n, const int bw, int bi_csr[]) { const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf; const PetscInt field = blockIdx.x, blkIdx = blockIdx.y; const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i); // set i (row+1) if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) bi_csr[0] = 0; // dummy at zero // for (int rowb = start_i + blkIdx*blockDim.y + threadIdx.y; rowb < end_i; rowb += Nblk*blockDim.y) { // rows in block for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y if (rowb < end_i && threadIdx.x==0) { PetscInt i=rowb+1, ni = (rowb>bw) ? bw+1 : i, n1L = ni*(ni-1)/2, nug= i*bw, n2L = bw*((rowb>bw) ? (rowb-bw) : 0), mi = bw + rowb + 1 - n, clip = (mi>0) ? mi*(mi-1)/2 + mi: 0; bi_csr[rowb+1] = n1L + nug - clip + n2L + i; } } } // copy AIJ to AIJ_BAND __global__ void __launch_bounds__(1024,1) mat_lu_factor_band_copy_aij_aij(const PetscInt n, const int bw, const PetscInt r[], const PetscInt ic[], const int ai_d[], const int aj_d[], const PetscScalar aa_d[], const int bi_csr[], PetscScalar ba_csr[]) { const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf; const PetscInt field = blockIdx.x, blkIdx = blockIdx.y; const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i); // zero B if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) ba_csr[bi_csr[n]] = 0; // flop count at end for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y if (rowb < end_i) { PetscScalar *batmp = ba_csr + bi_csr[rowb]; const PetscInt nzb = bi_csr[rowb+1] - bi_csr[rowb]; for (int j=threadIdx.x ; j<nzb ; j += blockDim.x) { if (j<nzb) { batmp[j] = 0; } } } } // copy A into B with CSR format -- these two loops can be fused for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y if (rowb < end_i) { const PetscInt rowa = r[rowb], nza = ai_d[rowa+1] - ai_d[rowa]; const int *ajtmp = aj_d + ai_d[rowa], bjStart = (rowb>bw) ? rowb-bw : 0; const PetscScalar *av = aa_d + ai_d[rowa]; PetscScalar *batmp = ba_csr + bi_csr[rowb]; /* load in initial (unfactored row) */ for (int j=threadIdx.x ; j<nza ; j += blockDim.x) { if (j<nza) { PetscInt colb = ic[ajtmp[j]], idx = colb - bjStart; PetscScalar vala = av[j]; batmp[idx] = vala; } } } } } // print AIJ_BAND __global__ void print_mat_aij_band(const PetscInt n, const int bi_csr[], const PetscScalar ba_csr[]) { // debug if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0){ printf("B (AIJ) n=%d:\n",(int)n); for (int rowb=0;rowb<n;rowb++) { const PetscInt nz = bi_csr[rowb+1] - bi_csr[rowb]; const PetscScalar *batmp = ba_csr + bi_csr[rowb]; for (int j=0; j<nz; j++) printf("(%13.6e) ",PetscRealPart(batmp[j])); printf(" bi=%d\n",bi_csr[rowb+1]); } } } // Band LU kernel --- ba_csr bi_csr __global__ void __launch_bounds__(1024,1) mat_lu_factor_band(const PetscInt n, const PetscInt bw, const int bi_csr[], PetscScalar ba_csr[]) { extern __shared__ PetscInt smemInt[]; PetscInt *sm_pkIdx = &smemInt[0]; const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf; const PetscInt field = blockIdx.x, blkIdx = blockIdx.y; const PetscInt start = field*nloc, end = start + nloc; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) auto g = cooperative_groups::this_grid(); #endif // A22 panel update for each row A(1,:) and col A(:,1) for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) { PetscInt tnzUd = bw, maxU = end-1 - glbDD; // we are chopping off the inter ears const PetscInt nzUd = (tnzUd>maxU) ? maxU : tnzUd, dOffset = (glbDD > bw) ? bw : glbDD; // global to go past ears after first const PetscInt nzUd_pad = blockDim.y*(nzUd/blockDim.y + !!(nzUd%blockDim.y)); PetscScalar *pBdd = ba_csr + bi_csr[glbDD] + dOffset; const PetscScalar *baUd = pBdd + 1; // vector of data U(i,i+1:end) const PetscScalar Bdd = *pBdd; const PetscInt offset = blkIdx*blockDim.y + threadIdx.y, inc = Nblk*blockDim.y; for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd_pad ; idx += inc, myi += inc) { /* assuming symmetric structure */ if (idx < nzUd && threadIdx.x==0) { /* assuming symmetric structure */ const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi-glbDD); // cuts off just the first (global) block PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx; *Aid = *Aid/Bdd; sm_pkIdx[threadIdx.y] = kIdx; } __syncthreads(); // synch on threadIdx.x only if (idx < nzUd) { /* assuming symmetric structure */ PetscInt kIdx = sm_pkIdx[threadIdx.y]; PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx; PetscScalar *Aij = Aid + 1; PetscScalar Lid = *Aid; for (int jIdx=threadIdx.x ; jIdx<nzUd ; jIdx += blockDim.x) { if (jIdx<nzUd) { Aij[jIdx] -= Lid*baUd[jIdx]; } } } } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) g.sync(); #else __syncthreads(); #endif } /* endof for (i=0; i<n; i++) { */ } static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat,Vec,Vec); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructA; CsrMatrix *matrixA; PetscErrorCode ierr; hipError_t cerr; const PetscInt n=A->rmap->n, *ic, *r; const int *ai_d, *aj_d; const PetscScalar *aa_d; PetscScalar *ba_t = cusparseTriFactors->a_band_d; int *bi_t = cusparseTriFactors->i_band_d; PetscContainer container; int Ni = 10, team_size=9, Nf, nVec=56, nconcurrent = 1, nsm = -1; PetscFunctionBegin; if (A->rmap->n == 0) { PetscFunctionReturn(0); } // cusparse setup if (!cusparsestructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparsestructA"); matstructA = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructA->mat; // matstruct->cprowIndices if (!matstructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct"); matrixA = (CsrMatrix*)matstructA->mat; if (!matrixA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matrix cusparsestructA->mat->mat"); // factor: get Nf if available ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr); if (container) { PetscInt *pNf=NULL; ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr); Nf = (*pNf)%1000; if ((*pNf)/1000>0) nconcurrent = (*pNf)/1000; // number of SMs to use } else Nf = 1; if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf); // get data ic = thrust::raw_pointer_cast(cusparseTriFactors->cpermIndices->data()); ai_d = thrust::raw_pointer_cast(matrixA->row_offsets->data()); aj_d = thrust::raw_pointer_cast(matrixA->column_indices->data()); aa_d = thrust::raw_pointer_cast(matrixA->values->data().get()); r = thrust::raw_pointer_cast(cusparseTriFactors->rpermIndices->data()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); { int bw = (2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-b->nz))+PETSC_MACHINE_EPSILON))/2, bm1=bw-1,nl=n/Nf; int gpuid; hipDeviceProp_t prop; hipGetDevice(&gpuid); hipGetDeviceProperties(&prop, gpuid); #if PETSC_PKG_CUDA_VERSION_LT(11,0,0) Ni = 1/nconcurrent; Ni = 1; #else nsm = prop.multiProcessorCount; Ni = nsm/Nf/nconcurrent; #endif team_size = bw/Ni + !!(bw%Ni); nVec = PetscMin(bw, 1024/team_size); ierr = PetscInfo5(A,"Matrix Bandwidth = %d, number SMs/block = %d, num concurency = %d, num fields = %d, numSMs/GPU = %d\n",bw,Ni,nconcurrent,Nf,nsm);CHKERRQ(ierr); { dim3 dimBlockTeam(nVec,team_size); dim3 dimBlockLeague(Nf,Ni); hipLaunchKernelGGL(( mat_lu_factor_band_copy_aij_aij), dim3(dimBlockLeague),dim3(dimBlockTeam), 0, 0, n, bw, r, ic, ai_d, aj_d, aa_d, bi_t, ba_t); CHECK_LAUNCH_ERROR(); // does a sync #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) void *kernelArgs[] = { (void*)&n, (void*)&bw, (void*)&bi_t, (void*)&ba_t}; hipLaunchCooperativeKernel((void*)mat_lu_factor_band, dimBlockLeague, dimBlockTeam, kernelArgs, team_size*sizeof(PetscInt), NULL); #else hipLaunchKernelGGL(( mat_lu_factor_band), dim3(dimBlockLeague),dim3(dimBlockTeam),team_size*sizeof(PetscInt), 0, n, bw, bi_t, ba_t); #endif CHECK_LAUNCH_ERROR(); // does a sync #if defined(PETSC_USE_LOG) ierr = PetscLogGpuFlops((PetscLogDouble)Nf*(bm1*(bm1 + 1)*(2*bm1 + 1)/3 + 2*(nl-bw)*bw*bw + nl*(nl+1)/2));CHKERRQ(ierr); #endif } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); /* determine which version of MatSolve needs to be used. from MatLUFactorNumeric_AIJ_SeqAIJCUSPARSE */ B->ops->solve = MatSolve_SeqAIJCUSPARSEBAND; B->ops->solvetranspose = NULL; // need transpose B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatrixNfDestroy(void *ptr) { PetscInt *nf = (PetscInt *)ptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscFree(nf);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b; IS isicol; PetscErrorCode ierr; hipError_t cerr; const PetscInt *ic,*ai=a->i,*aj=a->j; PetscScalar *ba_t; int *bi_t; PetscInt i,n=A->rmap->n,Nf; PetscInt nzBcsr,bwL,bwU; PetscBool missing; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscContainer container; PetscFunctionBegin; if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"matrix must be square"); ierr = MatMissingDiagonal(A,&missing,&i);CHKERRQ(ierr); if (missing) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix is missing diagonal entry %D",i); if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"!cusparseTriFactors"); ierr = MatGetOption(A,MAT_STRUCTURALLY_SYMMETRIC,&missing);CHKERRQ(ierr); if (!missing) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"only structrally symmetric matrices supported"); // factor: get Nf if available ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr); if (container) { PetscInt *pNf=NULL; ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr); Nf = (*pNf)%1000; ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr); ierr = PetscMalloc(sizeof(PetscInt), &pNf);CHKERRQ(ierr); *pNf = Nf; ierr = PetscContainerSetPointer(container, (void *)pNf);CHKERRQ(ierr); ierr = PetscContainerSetUserDestroy(container, MatrixNfDestroy);CHKERRQ(ierr); ierr = PetscObjectCompose((PetscObject)B, "Nf", (PetscObject) container);CHKERRQ(ierr); ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); } else Nf = 1; if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf); ierr = ISInvertPermutation(iscol,PETSC_DECIDE,&isicol);CHKERRQ(ierr); ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)isicol);CHKERRQ(ierr); b = (Mat_SeqAIJ*)(B)->data; /* get band widths, MatComputeBandwidth should take a reordering ic and do this */ bwL = bwU = 0; for (int rwb=0; rwb<n; rwb++) { const PetscInt rwa = ic[rwb], anz = ai[rwb+1] - ai[rwb], *ajtmp = aj + ai[rwb]; for (int j=0;j<anz;j++) { PetscInt colb = ic[ajtmp[j]]; if (colb<rwa) { // L if (rwa-colb > bwL) bwL = rwa-colb; } else { if (colb-rwa > bwU) bwU = colb-rwa; } } } ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr); /* only support structurally symmetric, but it might work */ if (bwL!=bwU) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Only symmetric structure supported (now) W_L=%D W_U=%D",bwL,bwU); ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); nzBcsr = n + (2*n-1)*bwU - bwU*bwU; b->maxnz = b->nz = nzBcsr; cusparseTriFactors->nnz = b->nz; // only meta data needed: n & nz if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cerr = hipMalloc(&ba_t,(b->nz+1)*sizeof(PetscScalar));CHKERRCUDA(cerr); // incude a place for flops cerr = hipMalloc(&bi_t,(n+1)*sizeof(int));CHKERRCUDA(cerr); cusparseTriFactors->a_band_d = ba_t; cusparseTriFactors->i_band_d = bi_t; /* In b structure: Free imax, ilen, old a, old j. Allocate solve_work, new a, new j */ ierr = PetscLogObjectMemory((PetscObject)B,(nzBcsr+1)*(sizeof(PetscInt)+sizeof(PetscScalar)));CHKERRQ(ierr); { dim3 dimBlockTeam(1,128); dim3 dimBlockLeague(Nf,1); hipLaunchKernelGGL(( mat_lu_factor_band_init_set_i), dim3(dimBlockLeague),dim3(dimBlockTeam), 0, 0, n, bwU, bi_t); } CHECK_LAUNCH_ERROR(); // does a sync // setup data if (!cusparseTriFactors->rpermIndices) { const PetscInt *r; ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* upper triangular indices */ if (!cusparseTriFactors->cpermIndices) { const PetscInt *c; ierr = ISGetIndices(isicol,&c);CHKERRQ(ierr); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); ierr = ISRestoreIndices(isicol,&c);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* put together the new matrix */ b->free_a = PETSC_FALSE; b->free_ij = PETSC_FALSE; b->singlemalloc = PETSC_FALSE; b->ilen = NULL; b->imax = NULL; b->row = isrow; b->col = iscol; ierr = PetscObjectReference((PetscObject)isrow);CHKERRQ(ierr); ierr = PetscObjectReference((PetscObject)iscol);CHKERRQ(ierr); b->icol = isicol; ierr = PetscMalloc1(n+1,&b->solve_work);CHKERRQ(ierr); B->factortype = MAT_FACTOR_LU; B->info.factor_mallocs = 0; B->info.fill_ratio_given = 0; if (ai[n]) { B->info.fill_ratio_needed = ((PetscReal)(nzBcsr))/((PetscReal)ai[n]); } else { B->info.fill_ratio_needed = 0.0; } #if defined(PETSC_USE_INFO) if (ai[n] != 0) { PetscReal af = B->info.fill_ratio_needed; ierr = PetscInfo1(A,"Band fill ratio %g\n",(double)af);CHKERRQ(ierr); } else { ierr = PetscInfo(A,"Empty matrix\n");CHKERRQ(ierr); } #endif if (a->inode.size) { ierr = PetscInfo(A,"Warning: using inodes in band solver.\n");CHKERRQ(ierr); } ierr = MatSeqAIJCheckInode_FactorLU(B);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSEBAND; B->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } /* Use -pc_factor_mat_solver_type cusparseband */ PetscErrorCode MatFactorGetSolverType_seqaij_cusparse_band(Mat A,MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSEBAND; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); (*B)->factortype = ftype; (*B)->canuseordering = PETSC_TRUE; ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSEBAND; } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSEBAND Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse_band);CHKERRQ(ierr); PetscFunctionReturn(0); } #define WARP_SIZE 32 template <typename T> __forceinline__ __device__ T wreduce(T a) { T b; #pragma unroll for (int i = WARP_SIZE/2; i >= 1; i = i >> 1) { b = __shfl_down_sync(0xffffffff, a, i); a += b; } return a; } // reduce in a block, returns result in thread 0 template <typename T, int BLOCK_SIZE> __device__ T breduce(T a) { constexpr int NWARP = BLOCK_SIZE/WARP_SIZE; __shared__ double buf[NWARP]; int wid = threadIdx.x / WARP_SIZE; int laneid = threadIdx.x % WARP_SIZE; T b = wreduce<T>(a); if (laneid == 0) buf[wid] = b; __syncthreads(); if (wid == 0) { if (threadIdx.x < NWARP) a = buf[threadIdx.x]; else a = 0; for (int i = (NWARP+1)/2; i >= 1; i = i >> 1) { a += __shfl_down_sync(0xffffffff, a, i); } } return a; } // Band LU kernel --- ba_csr bi_csr template <int BLOCK_SIZE> __global__ void __launch_bounds__(256,1) mat_solve_band(const PetscInt n, const PetscInt bw, const PetscScalar ba_csr[], PetscScalar x[]) { const PetscInt Nf = gridDim.x, nloc = n/Nf, field = blockIdx.x, start = field*nloc, end = start + nloc, chopnz = bw*(bw+1)/2, blocknz=(2*bw+1)*nloc, blocknz_0 = blocknz-chopnz; const PetscScalar *pLi; const int tid = threadIdx.x; /* Next, solve L */ pLi = ba_csr + (field==0 ? 0 : blocknz_0 + (field-1)*blocknz + bw); // diagonal (0,0) in field for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) { const PetscInt col = locDD<bw ? start : (glbDD-bw); PetscScalar t = 0; for (int j=col+tid,idx=tid;j<glbDD;j+=blockDim.x,idx+=blockDim.x) { t += pLi[idx]*x[j]; } #if defined(PETSC_USE_COMPLEX) PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t); PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti)); t = tt; #else t = breduce<PetscReal,BLOCK_SIZE>(t); #endif if (threadIdx.x == 0) x[glbDD] -= t; // /1.0 __syncthreads(); // inc pLi += glbDD-col; // get to diagonal if (glbDD > n-1-bw) pLi += n-1-glbDD; // skip over U, only last block has funny offset else pLi += bw; pLi += 1; // skip to next row if (field>0 && (locDD+1)<bw) pLi += bw-(locDD+1); // skip padding at beginning (ear) } /* Then, solve U */ pLi = ba_csr + Nf*blocknz - 2*chopnz - 1; // end of real data on block (diagonal) if (field != Nf-1) pLi -= blocknz_0 + (Nf-2-field)*blocknz + bw; // diagonal of last local row for (int glbDD=end-1, locDD = 0; glbDD >= start; glbDD--, locDD++) { const PetscInt col = (locDD<bw) ? end-1 : glbDD+bw; // end of row in U PetscScalar t = 0; for (int j=col-tid,idx=tid;j>glbDD;j-=blockDim.x,idx+=blockDim.x) { t += pLi[-idx]*x[j]; } #if defined(PETSC_USE_COMPLEX) PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t); PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti)); t = tt; #else t = breduce<PetscReal,BLOCK_SIZE>(PetscRealPart(t)); #endif pLi -= col-glbDD; // diagonal if (threadIdx.x == 0) { x[glbDD] -= t; x[glbDD] /= pLi[0]; } __syncthreads(); // inc past L to start of previous U pLi -= bw+1; if (glbDD<bw) pLi += bw-glbDD; // overshot in top left corner if (((locDD+1) < bw) && field != Nf-1) pLi -= (bw - (locDD+1)); // skip past right corner } } static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscInt n=A->rmap->n, nz=cusparseTriFactors->nnz, bw=(2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-nz))+PETSC_MACHINE_EPSILON))/2, Nf; PetscErrorCode ierr; hipError_t cerr; PetscContainer container; PetscFunctionBegin; if (A->rmap->n == 0) { PetscFunctionReturn(0); } // factor: get Nf if available ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr); if (container) { PetscInt *pNf=NULL; ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr); Nf = (*pNf)%1000; } else Nf = 1; if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf); /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); constexpr int block = 128; hipLaunchKernelGGL(( mat_solve_band<block>), dim3(Nf),dim3(block), 0, 0, n,bw,cusparseTriFactors->a_band_d,tempGPU->data().get()); CHECK_LAUNCH_ERROR(); // does a sync /* Last, reorder with the column permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); }
f9756ca278432421002b9378a3d2f9f1fcc90d78.cu
/* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/async/for_each.h> #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) #include <cooperative_groups.h> #endif const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { CUSPARSE_MV_ALG_DEFAULT = 0, CUSPARSE_COOMV_ALG = 1, CUSPARSE_CSRMV_ALG1 = 2, CUSPARSE_CSRMV_ALG2 = 3 } cusparseSpMVAlg_t; typedef enum { CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1, CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2, CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, CUSPARSE_SPMM_COO_ALG1 = 1, CUSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, CUSPARSE_SPMM_CSR_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } cusparseSpMMAlg_t; typedef enum { CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc } cusparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "cusparseSpMVAlg_t","CUSPARSE_",0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","cusparseSpMMAlg_t","CUSPARSE_SPMM_",0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","cusparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool); PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]); PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]); PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) { cusparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); cusparsestruct->stream = stream; stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) { cusparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); if (cusparsestruct->handle != handle) { if (cusparsestruct->handle) { stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat); } cusparsestruct->handle = handle; } stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSEClearHandle(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscBool flg; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg || !cusparsestruct) PetscFunctionReturn(0); if (cusparsestruct->handle) cusparsestruct->handle = 0; PetscFunctionReturn(0); } PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(0); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); (*B)->factortype = ftype; ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILUDT]);CHKERRQ(ierr); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); (*B)->canuseordering = PETSC_TRUE; ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); } PetscFunctionReturn(0); } /*@ MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular operation. Only the MatMult operation can use different GPU storage formats for MPIAIJCUSPARSE matrices. Not Collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) Output Parameter: Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg) { PetscErrorCode ierr; PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);} A->form_explicit_transpose = flg; break; default: ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr); break; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS isrow = b->row,iscol = b->col; PetscBool row_identity,col_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { PetscErrorCode ierr; MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);} ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "cusparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ if (flg && CUSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "cusparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr); if (flg && CUSPARSE_SPMM_CSR_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "cusparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr); if (flg && CUSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; cusparseStatus_t stat; const PetscInt *ai = a->i,*aj = a->j,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiLo, *AjLo; PetscInt i,nz, nzLower, offset, rowOffset; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower=n+ai[n]-ai[1]; if (!loTriFactor) { PetscScalar *AALo; cerr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the lower triangular matrix */ cerr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt) 0; AiLo[n] = nzLower; AjLo[0] = (PetscInt) 0; AALo[0] = (MatScalar) 1.0; v = aa; vi = aj; offset = 1; rowOffset= 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz+1; ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr); offset += nz; AjLo[offset] = (PetscInt) i; AALo[offset] = (MatScalar) 1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; cerr = cudaFreeHost(AiLo);CHKERRCUDA(cerr); cerr = cudaFreeHost(AjLo);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } else { /* update values only */ if (!loTriFactor->AA_h) { cerr = cudaMallocHost((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower); ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; cusparseStatus_t stat; const PetscInt *aj = a->j,*adiag = a->diag,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiUp, *AjUp; PetscInt i,nz, nzUpper, offset; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0]-adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the upper triangular matrix */ cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; vi = aj + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1./v[nz]; AiUp[i] = AiUp[i+1] - (nz+1); ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr); } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr); cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } else { if (!upTriFactor->AA_h) { cerr = cudaMallocHost((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the upper triangular matrix */ offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1./v[nz]; ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper); ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS isrow = a->row,iscol = a->icol; PetscBool row_identity,col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* upper triangular indices */ ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; cusparseStatus_t stat; PetscErrorCode ierr; cudaError_t cerr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; const PetscInt *ai = b->i,*aj = b->j,*vj; const MatScalar *aa = b->a,*v; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1.0/v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr); cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr); cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr); } else { /* Fill the upper triangular matrix */ offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0/v[nz]; AALo[offset] = 1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); } cerr = cudaFreeHost(AAUp);CHKERRCUDA(cerr); cerr = cudaFreeHost(AALo);CHKERRCUDA(cerr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=(a->nz-n)*2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (!perm_identity) { IS iip; const PetscInt *irip,*rip; ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr); ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr); ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip+n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip+n); ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr); ierr = ISDestroy(&iip);CHKERRQ(ierr); ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } #define CHECK_LAUNCH_ERROR() \ do { \ /* Check synchronous errors, i.e. pre-launch */ \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \ } \ /* Check asynchronous errors, i.e. kernel failed (ULF) */ \ err = cudaDeviceSynchronize(); \ if (cudaSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \ } \ } while (0) static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS ip = b->row; PetscBool perm_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; cusparseStatus_t stat; cusparseIndexBase_t indexBase; cusparseMatrixType_t matrixType; cusparseFillMode_t fillMode; cusparseDiagType_t diagType; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr); loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr); upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(0); } struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTransposeForMult(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; cusparseStatus_t stat; cusparseIndexBase_t indexBase; cudaError_t err; PetscErrorCode ierr; PetscFunctionBegin; if (!A->form_explicit_transpose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(0); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); if (cusparsestruct->format != MAT_CUSPARSE_CSR) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); } if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat); indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); /* set alpha and beta */ err = cudaMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); } cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat); #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());CHKERRCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY*) tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; delete (CsrMatrix*) tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY*) temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; delete (CsrMatrix*) temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat; if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix"); if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix rows"); if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix cols"); if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrix values"); if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT"); if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT rows"); if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT cols"); if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat); err = cudaMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n,matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) err = cudaFree(csr2cscBuffer);CHKERRCUDA(err); #endif } PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(0); } /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer #endif );CHKERRCUSPARSE(stat); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Then, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Last, reorder with the column permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get() #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,loTriFactor->solvePolicy, loTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); /* Next, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) ,upTriFactor->solvePolicy, upTriFactor->solveBuffer #endif );CHKERRCUSPARSE(stat); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat; ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); cerr = cudaMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); *array = a->a; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt m = A->rmap->n,*ii,*ridx,tmp; PetscErrorCode ierr; cusparseStatus_t stat; PetscBool both = PETSC_TRUE; cudaError_t err; PetscFunctionBegin; if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix*)cusparsestruct->mat->mat; if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR values"); ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); matrix->values->assign(a->a, a->a+a->nz); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); } else { PetscInt nnz; ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR row data"); if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR column data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); err = cudaMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = cusparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } #endif } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY*)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets; delete (CsrMatrix*)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx,ridx+m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ cusparseDnMatDescr_t matBDescr; cusparseDnMatDescr_t matCDescr; PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/ size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ cusparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { PetscErrorCode ierr; MatMatCusparse *mmdata = (MatMatCusparse *)data; cudaError_t cerr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseStatus_t stat; #endif PetscFunctionBegin; cerr = cudaFree(mmdata->Bt);CHKERRCUDA(cerr); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mmdata->matSpBDescr) { stat = cusparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); } if (mmdata->mmBuffer) { cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); } if (mmdata->mmBuffer2) { cerr = cudaFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); } if (mmdata->matBDescr) { stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); } if (mmdata->matCDescr) { stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); } if (mmdata->spgemmDesc) { stat = cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); } #endif ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr); ierr = PetscFree(data);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool); static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n,blda,clda; PetscBool flg,biscuda; Mat_SeqAIJCUSPARSE *cusp; cusparseStatus_t stat; cusparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; cudaError_t cerr; PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty"); mmdata = (MatMatCusparse*)product->data; A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = CUSPARSE_OPERATION_TRANSPOSE; } else { ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr); mat = cusp->matTranspose; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix*)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr); if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);} ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allcoate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) {stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;} if (!mmdata->matBDescr) { stat = cusparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) {stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;} if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ stat = cusparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Clda = clda; } if (!mat->matDescr) { stat = cusparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } stat = cusparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); cerr = cudaMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ stat = cusparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat); stat = cusparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat); stat = cusparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat); } /* do cusparseSpMM, which supports transpose on B */ stat = cusparseSpMM(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { cublasHandle_t cublasv2handle; cublasStatus_t cerr; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); cerr = cublasXgeam(cublasv2handle,CUBLAS_OP_T,CUBLAS_OP_T, B->cmap->n,B->rmap->n, &PETSC_CUSPARSE_ONE ,barray,blda, &PETSC_CUSPARSE_ZERO,barray,blda, mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k, csrmat->num_entries,mat->alpha_one,mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero, carray,clda);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); } else if (product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr); } if (mmdata->cisdense) { ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr); } if (!biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n; PetscBool cisdense,flg; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr); /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11,0,0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { cudaError_t cerr = cudaMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr); } #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr); ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr); } else { ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(0); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscBool flg; PetscErrorCode ierr; cusparseStatus_t stat; cudaError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseSpMatDescr_t BmatSpDescr; #endif PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty"); ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for C of type %s",((PetscObject)C)->type_name); mmdata = (MatMatCusparse*)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]); Ccsr = (CsrMatrix*)Cmat->mat; if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name); if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB; if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB; switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } Cmat = Ccusp->mat; if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]); if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix*)Cmat->mat; if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct"); if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct"); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #else stat = cusparse_csr_spgemm(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr); ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *a,*b,*c; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt i,j,m,n,k; PetscBool flg; PetscErrorCode ierr; cusparseStatus_t stat; cudaError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed,ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) int64_t C_num_rows1, C_num_cols1, C_nnz1; size_t bufSize2; cusparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name); a = (Mat_SeqAIJ*)A->data; b = (Mat_SeqAIJ*)B->data; Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format"); /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB; if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB; biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]); } /* create cusparse matrix */ ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)C->data; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr); ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1); stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix*)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i+1]; for (j=st; j<en; j++) { const PetscInt brow = a->j[j]; flops += 2.*(b->i[brow+1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt anzi = a->i[i+1] - a->i[i]; const PetscInt bnzi = b->i[i+1] - b->i[i]; flops += (2.*anzi)*bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); stat = cusparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat); /* ask bufferSize bytes for external memory */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ cerr = cudaMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr); /* compute the intermediate product of A * B */ stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat); c->nz = (PetscInt) C_nnz1; ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());CHKERRCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #else stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); stat = cusparseXcsrgemmNnz(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; cerr = cudaMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; cerr = cudaMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r+1] = old; } for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows]; } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k+1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); Ccsr->num_entries = c->nz; C->nonzerostate++; ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscErrorCode ierr; PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat,1); ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr); if (!product->A->boundtocpu && !product->B->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr); } if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr); } } if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray,*zarray,*dptr,*beta,*xptr; PetscErrorCode ierr; cudaError_t cerr; cusparseStatus_t stat; cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) PetscInt nx,ny; #endif PetscFunctionBegin; if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Hermitian and not transpose not supported"); if (!a->nonzerorowcnt) { if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);} else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);} PetscFunctionReturn(0); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr);} matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */ else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */ ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat); stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat); } stat = cusparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTransposeForMult() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat); #else CsrMatrix *mat = (CsrMatrix*)matstruct->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred) and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); ScatterAdd<<<(n+255)/256,256,0,PetscDefaultCudaStream>>>(n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } else { if (yy && yy != zz) { ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);} else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);} } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } if (yy) { ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr); } else { ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; PetscSplitCSRDataStructure *d_mat = NULL; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat; } ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); // this does very little if assembled on GPU - call it? if (mode == MAT_FLUSH_ASSEMBLY || A->boundtocpu) PetscFunctionReturn(0); if (d_mat) { A->offloadmask = PETSC_OFFLOAD_GPU; } PetscFunctionReturn(0); } /* --------------------------------------------------------------------------------*/ /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscSplitCSRDataStructure *d_mat = NULL; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat; ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat = NULL; ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); } if (d_mat) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; cudaError_t err; PetscSplitCSRDataStructure h_mat; ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr); err = cudaMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), cudaMemcpyDeviceToHost);CHKERRCUDA(err); if (a->compressedrow.use) { err = cudaFree(h_mat.diag.i);CHKERRCUDA(err); } err = cudaFree(d_mat);CHKERRCUDA(err); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str) { PetscErrorCode ierr; Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry,*csrx; cudaError_t cerr; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE*)Y->spptr; cx = (Mat_SeqAIJCUSPARSE*)X->spptr; if (X->ops->axpy != Y->ops->axpy) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); PetscFunctionReturn(0); } /* if we are here, it means both matrices are bound to GPU */ ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr); if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported"); if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix*)cy->mat->mat; csrx = (CsrMatrix*)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin()); if (eq) { eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin()); } if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { cusparseStatus_t stat; PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) size_t bufferSize; void *buffer; #endif ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&buffer,bufferSize);CHKERRCUDA(cerr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); cerr = cudaFree(buffer);CHKERRCUDA(cerr); #else ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else if (str == SAME_NONZERO_PATTERN) { cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscBLASInt one = 1, bnz = 1; ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a) { PetscErrorCode ierr; Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data; PetscScalar *ay; cudaError_t cerr; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr; if (spptr->mat) { CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } if (spptr->matTranspose) { CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat; if (matrix->values) { thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } } //ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr); ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0); if (flg) { ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); } A->boundtocpu = flg; a->inode.use = flg; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat) { PetscErrorCode ierr; cusparseStatus_t stat; Mat B; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr); } else if (reuse == MAT_REUSE_MATRIX) { ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */ spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*); PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { PetscErrorCode ierr; cusparseStatus_t stat; PetscFunctionBegin; if (*cusparsestruct) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr); delete (*cusparsestruct)->workVector; delete (*cusparsestruct)->rowoffsets_gpu; delete (*cusparsestruct)->cooPerm; delete (*cusparsestruct)->cooPerm_a; delete (*cusparsestruct)->csr2csc_i; if ((*cusparsestruct)->handle) {stat = cusparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);} ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { cusparseStatus_t stat; PetscErrorCode ierr; PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); } if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); } ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); if ((*trifactor)->solveBuffer) {cudaError_t cerr = cudaFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);} if ((*trifactor)->AA_h) {cudaError_t cerr = cudaFreeHost((*trifactor)->AA_h);CHKERRCUDA(cerr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if ((*trifactor)->csr2cscBuffer) {cudaError_t cerr = cudaFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);} #endif ierr = PetscFree(*trifactor);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) { CsrMatrix *mat; cusparseStatus_t stat; cudaError_t err; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat); #endif } else { mat = (CsrMatrix*)(*matstruct)->mat; CsrMatrix_Destroy(&mat); } } if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); } delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) { err=cudaFree((*matstruct)->alpha_one);CHKERRCUDA(err); } if ((*matstruct)->beta_zero) { err=cudaFree((*matstruct)->beta_zero);CHKERRCUDA(err); } if ((*matstruct)->beta_one) { err=cudaFree((*matstruct)->beta_one);CHKERRCUDA(err); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) {stat = cusparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);} for (int i=0; i<3; i++) { if (mdata->cuSpMV[i].initialized) { err = cudaFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err); stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat); stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors** trifactors) { PetscErrorCode ierr; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr); delete (*trifactors)->rpermIndices; delete (*trifactors)->cpermIndices; delete (*trifactors)->workVector; (*trifactors)->rpermIndices = NULL; (*trifactors)->cpermIndices = NULL; (*trifactors)->workVector = NULL; if ((*trifactors)->a_band_d) {cudaError_t cerr = cudaFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);} if ((*trifactors)->i_band_d) {cudaError_t cerr = cudaFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);} } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) { PetscErrorCode ierr; cusparseHandle_t handle; cusparseStatus_t stat; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr); if (handle = (*trifactors)->handle) { stat = cusparseDestroy(handle);CHKERRCUSPARSE(stat); } ierr = PetscFree(*trifactors);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct IJCompare { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct IJEqual { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false; return true; } }; struct IJDiff { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; } }; struct IJSum { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1||t2; } }; #include <thrust/iterator/discard_iterator.h> PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; THRUSTARRAY *cooPerm_v = NULL; thrust::device_ptr<const PetscScalar> d_v; CsrMatrix *matrix; PetscErrorCode ierr; cudaError_t cerr; PetscInt n; PetscFunctionBegin; if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix"); if (!cusp->cooPerm) { ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); } matrix = (CsrMatrix*)cusp->mat->mat; if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); if (!v) { if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); goto finalize; } n = cusp->cooPerm->size(); if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { cooPerm_v = new THRUSTARRAY(n); cooPerm_v->assign(v,v+n); d_v = cooPerm_v->data(); ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */ if (cusp->cooPerm_a) { THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size()); auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>()); delete cooPerm_w; } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAPlusEquals()); } } else { if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */ auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAEquals()); } } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalize: delete cooPerm_v; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr); ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr); a->reallocs = 0; A->info.mallocs += 0; A->info.nz_unneeded = 0; A->assembled = A->was_assembled = PETSC_TRUE; A->num_ass++; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(0); if (destroy) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(0); } #include <thrust/binary_search.h> PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[]) { PetscErrorCode ierr; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt cooPerm_n, nzr = 0; cudaError_t cerr; PetscFunctionBegin; ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0; if (n != cooPerm_n) { delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; } if (n) { THRUSTINTARRAY d_i(n); THRUSTINTARRAY d_j(n); THRUSTINTARRAY ii(A->rmap->n); if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); } if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); } ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); d_i.assign(coo_i,coo_i+n); d_j.assign(coo_j,coo_j+n); auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin())); auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end())); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0); thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); *cusp->cooPerm_a = d_i; THRUSTINTARRAY w = d_j; auto nekey = thrust::unique(fkey, ekey, IJEqual()); if (nekey == ekey) { /* all entries are unique */ delete cusp->cooPerm_a; cusp->cooPerm_a = NULL; } else { /* I couldn't come up with a more elegant algorithm */ adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff()); adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff()); (*cusp->cooPerm_a)[0] = 0; w[0] = 0; thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum()); thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>()); } thrust::counting_iterator<PetscInt> search_begin(0); thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(), search_begin, search_begin + A->rmap->n, ii.begin()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr); a->singlemalloc = PETSC_FALSE; a->free_a = PETSC_TRUE; a->free_ij = PETSC_TRUE; ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr); a->i[0] = 0; cerr = cudaMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); a->nz = a->maxnz = a->i[A->rmap->n]; a->rmax = 0; ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr); ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr); cerr = cudaMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); } if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); } for (PetscInt i = 0; i < A->rmap->n; i++) { const PetscInt nnzr = a->i[i+1] - a->i[i]; nzr += (PetscInt)!!(nnzr); a->ilen[i] = a->imax[i] = nnzr; a->rmax = PetscMax(a->rmax,nnzr); } a->nonzerorowcnt = nzr; A->preallocated = PETSC_TRUE; ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr); ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr); } else { ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr); } ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); /* We want to allocate the CUSPARSE struct for matvec now. The code is so convoluted now that I prefer to copy zeros */ ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr); ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_CPU; A->nonzerostate++; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); A->assembled = PETSC_FALSE; A->was_assembled = PETSC_FALSE; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a) { PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } struct IJCompare4 { __host__ __device__ inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) {} __host__ __device__ inline int operator() (const int &c) { return c + _shift; } }; /* merges to SeqAIJCUSPARSE matrices, [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt Annz,Bnnz; cusparseStatus_t stat; PetscInt i,m,n,zero = 0; cudaError_t cerr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidHeaderSpecific(B,MAT_CLASSID,2); PetscValidPointer(C,4); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); PetscCheckTypeName(B,MATSEQAIJCUSPARSE); if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n); if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported"); if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr); ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEFormExplicitTransposeForMult(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m+1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->cooPerm = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff,*Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10,0,0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin())); auto p1 = Ccusp->cooPerm->begin(); auto p2 = Ccusp->cooPerm->begin(); thrust::advance(p2,Annz); PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10,0,0) thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred)); PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred)); #endif stat = cusparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n+1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT); thrust::advance(rT,-1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz)); thrust::copy(titb,tite,rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT); if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); stat = cusparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; cerr = cudaMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i+1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); (*C)->nonzerostate++; ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n); c = (Mat_SeqAIJ*)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm"); if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Ccsr = (CsrMatrix*)Ccusp->mat->mat; if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size()); if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size()); if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size()); if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries); if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size()); auto pmid = Ccusp->cooPerm->begin(); thrust::advance(pmid,Acsr->num_entries); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); thrust::for_each(zibait,zieait,VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end()))); thrust::for_each(zibbit,ziebit,VecCUDAEquals()); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); (*C)->transupdated = PETSC_TRUE; } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { PetscErrorCode ierr; bool dmem; const PetscScalar *av; cudaError_t cerr; PetscFunctionBegin; dmem = isCudaMem(v); ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx,idx+n); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n)); thrust::for_each(zibit,zieit,VecCUDAEquals()); if (w) { cerr = cudaMemcpy(v,w->data().get(),n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } delete w; } else { cerr = cudaMemcpy(v,av,n*sizeof(PetscScalar),dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr); PetscFunctionReturn(0); } /* LU BAND factorization with optimization for block diagonal (Nf blocks) in natural order (-mat_no_inode -pc_factor_mat_ordering_type rcm with Nf>1 fields) requires: structurally symmetric: fix with transpose/column meta data */ /* The GPU LU factor kernel */ __global__ void __launch_bounds__(1024,1) mat_lu_factor_band_init_set_i(const PetscInt n, const int bw, int bi_csr[]) { const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf; const PetscInt field = blockIdx.x, blkIdx = blockIdx.y; const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i); // set i (row+1) if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) bi_csr[0] = 0; // dummy at zero // for (int rowb = start_i + blkIdx*blockDim.y + threadIdx.y; rowb < end_i; rowb += Nblk*blockDim.y) { // rows in block for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y if (rowb < end_i && threadIdx.x==0) { PetscInt i=rowb+1, ni = (rowb>bw) ? bw+1 : i, n1L = ni*(ni-1)/2, nug= i*bw, n2L = bw*((rowb>bw) ? (rowb-bw) : 0), mi = bw + rowb + 1 - n, clip = (mi>0) ? mi*(mi-1)/2 + mi: 0; bi_csr[rowb+1] = n1L + nug - clip + n2L + i; } } } // copy AIJ to AIJ_BAND __global__ void __launch_bounds__(1024,1) mat_lu_factor_band_copy_aij_aij(const PetscInt n, const int bw, const PetscInt r[], const PetscInt ic[], const int ai_d[], const int aj_d[], const PetscScalar aa_d[], const int bi_csr[], PetscScalar ba_csr[]) { const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf; const PetscInt field = blockIdx.x, blkIdx = blockIdx.y; const PetscInt nloc_i = (nloc/Nblk + !!(nloc%Nblk)), start_i = field*nloc + blkIdx*nloc_i, end_i = (start_i + nloc_i) > (field+1)*nloc ? (field+1)*nloc : (start_i + nloc_i); // zero B if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) ba_csr[bi_csr[n]] = 0; // flop count at end for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y if (rowb < end_i) { PetscScalar *batmp = ba_csr + bi_csr[rowb]; const PetscInt nzb = bi_csr[rowb+1] - bi_csr[rowb]; for (int j=threadIdx.x ; j<nzb ; j += blockDim.x) { if (j<nzb) { batmp[j] = 0; } } } } // copy A into B with CSR format -- these two loops can be fused for (int rowb = start_i + threadIdx.y; rowb < end_i; rowb += blockDim.y) { // rows in block by thread y if (rowb < end_i) { const PetscInt rowa = r[rowb], nza = ai_d[rowa+1] - ai_d[rowa]; const int *ajtmp = aj_d + ai_d[rowa], bjStart = (rowb>bw) ? rowb-bw : 0; const PetscScalar *av = aa_d + ai_d[rowa]; PetscScalar *batmp = ba_csr + bi_csr[rowb]; /* load in initial (unfactored row) */ for (int j=threadIdx.x ; j<nza ; j += blockDim.x) { if (j<nza) { PetscInt colb = ic[ajtmp[j]], idx = colb - bjStart; PetscScalar vala = av[j]; batmp[idx] = vala; } } } } } // print AIJ_BAND __global__ void print_mat_aij_band(const PetscInt n, const int bi_csr[], const PetscScalar ba_csr[]) { // debug if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0){ printf("B (AIJ) n=%d:\n",(int)n); for (int rowb=0;rowb<n;rowb++) { const PetscInt nz = bi_csr[rowb+1] - bi_csr[rowb]; const PetscScalar *batmp = ba_csr + bi_csr[rowb]; for (int j=0; j<nz; j++) printf("(%13.6e) ",PetscRealPart(batmp[j])); printf(" bi=%d\n",bi_csr[rowb+1]); } } } // Band LU kernel --- ba_csr bi_csr __global__ void __launch_bounds__(1024,1) mat_lu_factor_band(const PetscInt n, const PetscInt bw, const int bi_csr[], PetscScalar ba_csr[]) { extern __shared__ PetscInt smemInt[]; PetscInt *sm_pkIdx = &smemInt[0]; const PetscInt Nf = gridDim.x, Nblk = gridDim.y, nloc = n/Nf; const PetscInt field = blockIdx.x, blkIdx = blockIdx.y; const PetscInt start = field*nloc, end = start + nloc; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) auto g = cooperative_groups::this_grid(); #endif // A22 panel update for each row A(1,:) and col A(:,1) for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) { PetscInt tnzUd = bw, maxU = end-1 - glbDD; // we are chopping off the inter ears const PetscInt nzUd = (tnzUd>maxU) ? maxU : tnzUd, dOffset = (glbDD > bw) ? bw : glbDD; // global to go past ears after first const PetscInt nzUd_pad = blockDim.y*(nzUd/blockDim.y + !!(nzUd%blockDim.y)); PetscScalar *pBdd = ba_csr + bi_csr[glbDD] + dOffset; const PetscScalar *baUd = pBdd + 1; // vector of data U(i,i+1:end) const PetscScalar Bdd = *pBdd; const PetscInt offset = blkIdx*blockDim.y + threadIdx.y, inc = Nblk*blockDim.y; for (int idx = offset, myi = glbDD + offset + 1; idx < nzUd_pad ; idx += inc, myi += inc) { /* assuming symmetric structure */ if (idx < nzUd && threadIdx.x==0) { /* assuming symmetric structure */ const PetscInt bwi = myi > bw ? bw : myi, kIdx = bwi - (myi-glbDD); // cuts off just the first (global) block PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx; *Aid = *Aid/Bdd; sm_pkIdx[threadIdx.y] = kIdx; } __syncthreads(); // synch on threadIdx.x only if (idx < nzUd) { /* assuming symmetric structure */ PetscInt kIdx = sm_pkIdx[threadIdx.y]; PetscScalar *Aid = ba_csr + bi_csr[myi] + kIdx; PetscScalar *Aij = Aid + 1; PetscScalar Lid = *Aid; for (int jIdx=threadIdx.x ; jIdx<nzUd ; jIdx += blockDim.x) { if (jIdx<nzUd) { Aij[jIdx] -= Lid*baUd[jIdx]; } } } } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) g.sync(); #else __syncthreads(); #endif } /* endof for (i=0; i<n; i++) { */ } static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat,Vec,Vec); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSEBAND(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructA; CsrMatrix *matrixA; PetscErrorCode ierr; cudaError_t cerr; const PetscInt n=A->rmap->n, *ic, *r; const int *ai_d, *aj_d; const PetscScalar *aa_d; PetscScalar *ba_t = cusparseTriFactors->a_band_d; int *bi_t = cusparseTriFactors->i_band_d; PetscContainer container; int Ni = 10, team_size=9, Nf, nVec=56, nconcurrent = 1, nsm = -1; PetscFunctionBegin; if (A->rmap->n == 0) { PetscFunctionReturn(0); } // cusparse setup if (!cusparsestructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparsestructA"); matstructA = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructA->mat; // matstruct->cprowIndices if (!matstructA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing mat struct"); matrixA = (CsrMatrix*)matstructA->mat; if (!matrixA) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing matrix cusparsestructA->mat->mat"); // factor: get Nf if available ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr); if (container) { PetscInt *pNf=NULL; ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr); Nf = (*pNf)%1000; if ((*pNf)/1000>0) nconcurrent = (*pNf)/1000; // number of SMs to use } else Nf = 1; if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf); // get data ic = thrust::raw_pointer_cast(cusparseTriFactors->cpermIndices->data()); ai_d = thrust::raw_pointer_cast(matrixA->row_offsets->data()); aj_d = thrust::raw_pointer_cast(matrixA->column_indices->data()); aa_d = thrust::raw_pointer_cast(matrixA->values->data().get()); r = thrust::raw_pointer_cast(cusparseTriFactors->rpermIndices->data()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); { int bw = (2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-b->nz))+PETSC_MACHINE_EPSILON))/2, bm1=bw-1,nl=n/Nf; int gpuid; cudaDeviceProp prop; cudaGetDevice(&gpuid); cudaGetDeviceProperties(&prop, gpuid); #if PETSC_PKG_CUDA_VERSION_LT(11,0,0) Ni = 1/nconcurrent; Ni = 1; #else nsm = prop.multiProcessorCount; Ni = nsm/Nf/nconcurrent; #endif team_size = bw/Ni + !!(bw%Ni); nVec = PetscMin(bw, 1024/team_size); ierr = PetscInfo5(A,"Matrix Bandwidth = %d, number SMs/block = %d, num concurency = %d, num fields = %d, numSMs/GPU = %d\n",bw,Ni,nconcurrent,Nf,nsm);CHKERRQ(ierr); { dim3 dimBlockTeam(nVec,team_size); dim3 dimBlockLeague(Nf,Ni); mat_lu_factor_band_copy_aij_aij<<<dimBlockLeague,dimBlockTeam>>>(n, bw, r, ic, ai_d, aj_d, aa_d, bi_t, ba_t); CHECK_LAUNCH_ERROR(); // does a sync #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) void *kernelArgs[] = { (void*)&n, (void*)&bw, (void*)&bi_t, (void*)&ba_t}; cudaLaunchCooperativeKernel((void*)mat_lu_factor_band, dimBlockLeague, dimBlockTeam, kernelArgs, team_size*sizeof(PetscInt), NULL); #else mat_lu_factor_band<<<dimBlockLeague,dimBlockTeam,team_size*sizeof(PetscInt)>>>(n, bw, bi_t, ba_t); #endif CHECK_LAUNCH_ERROR(); // does a sync #if defined(PETSC_USE_LOG) ierr = PetscLogGpuFlops((PetscLogDouble)Nf*(bm1*(bm1 + 1)*(2*bm1 + 1)/3 + 2*(nl-bw)*bw*bw + nl*(nl+1)/2));CHKERRQ(ierr); #endif } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); /* determine which version of MatSolve needs to be used. from MatLUFactorNumeric_AIJ_SeqAIJCUSPARSE */ B->ops->solve = MatSolve_SeqAIJCUSPARSEBAND; B->ops->solvetranspose = NULL; // need transpose B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatrixNfDestroy(void *ptr) { PetscInt *nf = (PetscInt *)ptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscFree(nf);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSEBAND(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b; IS isicol; PetscErrorCode ierr; cudaError_t cerr; const PetscInt *ic,*ai=a->i,*aj=a->j; PetscScalar *ba_t; int *bi_t; PetscInt i,n=A->rmap->n,Nf; PetscInt nzBcsr,bwL,bwU; PetscBool missing; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscContainer container; PetscFunctionBegin; if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"matrix must be square"); ierr = MatMissingDiagonal(A,&missing,&i);CHKERRQ(ierr); if (missing) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix is missing diagonal entry %D",i); if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"!cusparseTriFactors"); ierr = MatGetOption(A,MAT_STRUCTURALLY_SYMMETRIC,&missing);CHKERRQ(ierr); if (!missing) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"only structrally symmetric matrices supported"); // factor: get Nf if available ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr); if (container) { PetscInt *pNf=NULL; ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr); Nf = (*pNf)%1000; ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr); ierr = PetscMalloc(sizeof(PetscInt), &pNf);CHKERRQ(ierr); *pNf = Nf; ierr = PetscContainerSetPointer(container, (void *)pNf);CHKERRQ(ierr); ierr = PetscContainerSetUserDestroy(container, MatrixNfDestroy);CHKERRQ(ierr); ierr = PetscObjectCompose((PetscObject)B, "Nf", (PetscObject) container);CHKERRQ(ierr); ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); } else Nf = 1; if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf); ierr = ISInvertPermutation(iscol,PETSC_DECIDE,&isicol);CHKERRQ(ierr); ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)isicol);CHKERRQ(ierr); b = (Mat_SeqAIJ*)(B)->data; /* get band widths, MatComputeBandwidth should take a reordering ic and do this */ bwL = bwU = 0; for (int rwb=0; rwb<n; rwb++) { const PetscInt rwa = ic[rwb], anz = ai[rwb+1] - ai[rwb], *ajtmp = aj + ai[rwb]; for (int j=0;j<anz;j++) { PetscInt colb = ic[ajtmp[j]]; if (colb<rwa) { // L if (rwa-colb > bwL) bwL = rwa-colb; } else { if (colb-rwa > bwU) bwU = colb-rwa; } } } ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr); /* only support structurally symmetric, but it might work */ if (bwL!=bwU) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Only symmetric structure supported (now) W_L=%D W_U=%D",bwL,bwU); ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); nzBcsr = n + (2*n-1)*bwU - bwU*bwU; b->maxnz = b->nz = nzBcsr; cusparseTriFactors->nnz = b->nz; // only meta data needed: n & nz if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cerr = cudaMalloc(&ba_t,(b->nz+1)*sizeof(PetscScalar));CHKERRCUDA(cerr); // incude a place for flops cerr = cudaMalloc(&bi_t,(n+1)*sizeof(int));CHKERRCUDA(cerr); cusparseTriFactors->a_band_d = ba_t; cusparseTriFactors->i_band_d = bi_t; /* In b structure: Free imax, ilen, old a, old j. Allocate solve_work, new a, new j */ ierr = PetscLogObjectMemory((PetscObject)B,(nzBcsr+1)*(sizeof(PetscInt)+sizeof(PetscScalar)));CHKERRQ(ierr); { dim3 dimBlockTeam(1,128); dim3 dimBlockLeague(Nf,1); mat_lu_factor_band_init_set_i<<<dimBlockLeague,dimBlockTeam>>>(n, bwU, bi_t); } CHECK_LAUNCH_ERROR(); // does a sync // setup data if (!cusparseTriFactors->rpermIndices) { const PetscInt *r; ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* upper triangular indices */ if (!cusparseTriFactors->cpermIndices) { const PetscInt *c; ierr = ISGetIndices(isicol,&c);CHKERRQ(ierr); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); ierr = ISRestoreIndices(isicol,&c);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* put together the new matrix */ b->free_a = PETSC_FALSE; b->free_ij = PETSC_FALSE; b->singlemalloc = PETSC_FALSE; b->ilen = NULL; b->imax = NULL; b->row = isrow; b->col = iscol; ierr = PetscObjectReference((PetscObject)isrow);CHKERRQ(ierr); ierr = PetscObjectReference((PetscObject)iscol);CHKERRQ(ierr); b->icol = isicol; ierr = PetscMalloc1(n+1,&b->solve_work);CHKERRQ(ierr); B->factortype = MAT_FACTOR_LU; B->info.factor_mallocs = 0; B->info.fill_ratio_given = 0; if (ai[n]) { B->info.fill_ratio_needed = ((PetscReal)(nzBcsr))/((PetscReal)ai[n]); } else { B->info.fill_ratio_needed = 0.0; } #if defined(PETSC_USE_INFO) if (ai[n] != 0) { PetscReal af = B->info.fill_ratio_needed; ierr = PetscInfo1(A,"Band fill ratio %g\n",(double)af);CHKERRQ(ierr); } else { ierr = PetscInfo(A,"Empty matrix\n");CHKERRQ(ierr); } #endif if (a->inode.size) { ierr = PetscInfo(A,"Warning: using inodes in band solver.\n");CHKERRQ(ierr); } ierr = MatSeqAIJCheckInode_FactorLU(B);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSEBAND; B->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } /* Use -pc_factor_mat_solver_type cusparseband */ PetscErrorCode MatFactorGetSolverType_seqaij_cusparse_band(Mat A,MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSEBAND; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); (*B)->factortype = ftype; (*B)->canuseordering = PETSC_TRUE; ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSEBAND; } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSEBAND Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse_band);CHKERRQ(ierr); PetscFunctionReturn(0); } #define WARP_SIZE 32 template <typename T> __forceinline__ __device__ T wreduce(T a) { T b; #pragma unroll for (int i = WARP_SIZE/2; i >= 1; i = i >> 1) { b = __shfl_down_sync(0xffffffff, a, i); a += b; } return a; } // reduce in a block, returns result in thread 0 template <typename T, int BLOCK_SIZE> __device__ T breduce(T a) { constexpr int NWARP = BLOCK_SIZE/WARP_SIZE; __shared__ double buf[NWARP]; int wid = threadIdx.x / WARP_SIZE; int laneid = threadIdx.x % WARP_SIZE; T b = wreduce<T>(a); if (laneid == 0) buf[wid] = b; __syncthreads(); if (wid == 0) { if (threadIdx.x < NWARP) a = buf[threadIdx.x]; else a = 0; for (int i = (NWARP+1)/2; i >= 1; i = i >> 1) { a += __shfl_down_sync(0xffffffff, a, i); } } return a; } // Band LU kernel --- ba_csr bi_csr template <int BLOCK_SIZE> __global__ void __launch_bounds__(256,1) mat_solve_band(const PetscInt n, const PetscInt bw, const PetscScalar ba_csr[], PetscScalar x[]) { const PetscInt Nf = gridDim.x, nloc = n/Nf, field = blockIdx.x, start = field*nloc, end = start + nloc, chopnz = bw*(bw+1)/2, blocknz=(2*bw+1)*nloc, blocknz_0 = blocknz-chopnz; const PetscScalar *pLi; const int tid = threadIdx.x; /* Next, solve L */ pLi = ba_csr + (field==0 ? 0 : blocknz_0 + (field-1)*blocknz + bw); // diagonal (0,0) in field for (int glbDD=start, locDD = 0; glbDD<end; glbDD++, locDD++) { const PetscInt col = locDD<bw ? start : (glbDD-bw); PetscScalar t = 0; for (int j=col+tid,idx=tid;j<glbDD;j+=blockDim.x,idx+=blockDim.x) { t += pLi[idx]*x[j]; } #if defined(PETSC_USE_COMPLEX) PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t); PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti)); t = tt; #else t = breduce<PetscReal,BLOCK_SIZE>(t); #endif if (threadIdx.x == 0) x[glbDD] -= t; // /1.0 __syncthreads(); // inc pLi += glbDD-col; // get to diagonal if (glbDD > n-1-bw) pLi += n-1-glbDD; // skip over U, only last block has funny offset else pLi += bw; pLi += 1; // skip to next row if (field>0 && (locDD+1)<bw) pLi += bw-(locDD+1); // skip padding at beginning (ear) } /* Then, solve U */ pLi = ba_csr + Nf*blocknz - 2*chopnz - 1; // end of real data on block (diagonal) if (field != Nf-1) pLi -= blocknz_0 + (Nf-2-field)*blocknz + bw; // diagonal of last local row for (int glbDD=end-1, locDD = 0; glbDD >= start; glbDD--, locDD++) { const PetscInt col = (locDD<bw) ? end-1 : glbDD+bw; // end of row in U PetscScalar t = 0; for (int j=col-tid,idx=tid;j>glbDD;j-=blockDim.x,idx+=blockDim.x) { t += pLi[-idx]*x[j]; } #if defined(PETSC_USE_COMPLEX) PetscReal tr = PetscRealPartComplex(t), ti = PetscImaginaryPartComplex(t); PetscScalar tt(breduce<PetscReal,BLOCK_SIZE>(tr), breduce<PetscReal,BLOCK_SIZE>(ti)); t = tt; #else t = breduce<PetscReal,BLOCK_SIZE>(PetscRealPart(t)); #endif pLi -= col-glbDD; // diagonal if (threadIdx.x == 0) { x[glbDD] -= t; x[glbDD] /= pLi[0]; } __syncthreads(); // inc past L to start of previous U pLi -= bw+1; if (glbDD<bw) pLi += bw-glbDD; // overshot in top left corner if (((locDD+1) < bw) && field != Nf-1) pLi -= (bw - (locDD+1)); // skip past right corner } } static PetscErrorCode MatSolve_SeqAIJCUSPARSEBAND(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscInt n=A->rmap->n, nz=cusparseTriFactors->nnz, bw=(2*n-1 - (int)(PetscSqrtReal(1+4*(n*n-nz))+PETSC_MACHINE_EPSILON))/2, Nf; PetscErrorCode ierr; cudaError_t cerr; PetscContainer container; PetscFunctionBegin; if (A->rmap->n == 0) { PetscFunctionReturn(0); } // factor: get Nf if available ierr = PetscObjectQuery((PetscObject) A, "Nf", (PetscObject *) &container);CHKERRQ(ierr); if (container) { PetscInt *pNf=NULL; ierr = PetscContainerGetPointer(container, (void **) &pNf);CHKERRQ(ierr); Nf = (*pNf)%1000; } else Nf = 1; if (n%Nf) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"n % Nf != 0 %D %D",n,Nf); /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); constexpr int block = 128; mat_solve_band<block><<<Nf,block>>>(n,bw,cusparseTriFactors->a_band_d,tempGPU->data().get()); CHECK_LAUNCH_ERROR(); // does a sync /* Last, reorder with the column permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); }
6fbaa9ffdebd4a8b91032c490b29883289f1972c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width) { // Calculate the row index of the Pd element and M int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; // Calculate the column idenx of Pd and N int Col = blockIdx.x*BLOCK_SIZE + threadIdx.x; float Pvalue = 0; // each thread computes one element of the block sub-matrix for (int k = 0; k < Width; ++k) Pvalue += Md[Row*Width+k] * Nd[k*Width+Col]; Pd[Row*Width+Col] = Pvalue; }
6fbaa9ffdebd4a8b91032c490b29883289f1972c.cu
#include "includes.h" __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width) { // Calculate the row index of the Pd element and M int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; // Calculate the column idenx of Pd and N int Col = blockIdx.x*BLOCK_SIZE + threadIdx.x; float Pvalue = 0; // each thread computes one element of the block sub-matrix for (int k = 0; k < Width; ++k) Pvalue += Md[Row*Width+k] * Nd[k*Width+Col]; Pd[Row*Width+Col] = Pvalue; }
5ea50699474f166d520c2895217d37db0d534cf9.hip
// !!! This is a file automatically generated by hipify!!! /* CUDA code for mutiplying bit-matrix A with bit-vector x over the GF(2) field. Author: Shashwat Shukla 15th Dec 2016 Tiled multiplication is performed. 32x32 tiles have been used. C++ does not have a data-type of size one bit. std::bitset is a pseudo-container that stores bits inside integer variables. However, they can't be passed by reference and hence can't be used for GPU compute. std::vector<bool> also suffers from this issue. It also does store bits in contiguous spaces and thus would drastically slow down GPU memory access as coalescing will not be possible. A custom implementation of a bit-set is hence used, which also uses unsigned integers to store and access bits. Note that we could store one bit in one int variable. But this would be wasteful as each integer is made of 32 bits. */ #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <math.h> using namespace std; #define n 10000 //The size of the matrix #define N (n + 31) / 32 //Number of unsigned integers (32 bits each) needed to store the bits. //Define matrices and vectors unsigned int A[n][N]; unsigned int x[N]; unsigned int *Ad, *xd; //Calculates the xor of the 32 bits of an integer __device__ unsigned int xorBits(unsigned int x) { unsigned int temp = 0; #pragma unroll for (int i = 0; i < 32; i++) { temp = temp + (x >> i); } temp = temp % 2; return temp; } //The kernel __global__ void kernel(unsigned int *Ad, unsigned int *xd) { __shared__ unsigned int B[32][32]; //Stores a sub-matrix of A __shared__ unsigned int v[32]; //Stores a part of the vector x __shared__ unsigned int c[32]; //Stores the result for this block //Thread id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int offset = N * 32 * by + N * ty + tx; //Initialise the vector c to zero if (ty == 0) c[tx] = 0; #pragma unroll for (int i = 0; i < (N + 31) / 32; i++) { //Traverse the entire width of the array A (along the column) if ((i * 32 + tx) < N && (by * 32 + ty) < n) //Padding B[ty][tx] = Ad[offset + i * 32]; else B[ty][tx] = 0; if (ty == 0) v[tx] = xd[i * 32 + tx]; __syncthreads(); //Wait for all threads to finish copying data to shared memory c[ty] = c[ty] ^ (B[ty][tx] & v[tx]); __syncthreads(); //Wait for all threads to finish computation and store the result in shared memory if (ty == 0) //Compress the resultant vector c[tx] = xorBits(c[tx]); __syncthreads(); } if (tx == 0 && ty == 0) { //Find the resultant vector from each block unsigned int temp = 0; temp = c[31]; #pragma unroll for (int i = 30; i >= 0; i--) { temp = temp << 1; temp = temp + c[i]; } xd[by] = temp; //Copy result from each block to the final result } } //Launch the kernel and check for errors hipError_t launchKernel() { hipError_t cudaStatus; //Define kernel grid dimensions size_t size = N * sizeof(unsigned int); dim3 threadsPerBlock(32, 32); dim3 numBlocks(1, N); // Choose the GPU to run code on cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Check if there is a CUDA enabled GPU?"); getchar(); goto Error; } // Allocate GPU buffers cudaStatus = hipMalloc((void**)&Ad, n * size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); getchar(); goto Error; } cudaStatus = hipMalloc((void**)&xd, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); getchar(); goto Error; } // Copy input vectors from host memory to GPU. cudaStatus = hipMemcpy(Ad, A, size * n, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (host to GPU) for A failed!"); getchar(); goto Error; } cudaStatus = hipMemcpy(xd, x, size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (host to GPU) for x failed!"); getchar(); goto Error; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //Launch kernel kernel << < numBlocks, threadsPerBlock >> > (Ad, xd); hipEventRecord(stop, 0); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipEventDestroy(start); hipEventDestroy(stop); cout << "Kernel Execution time(ms) = " << milliseconds << endl; // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); getchar(); goto Error; } // Copy output vectors from GPU to host memory. cudaStatus = hipMemcpy(x, xd, size, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy (GPU to host) failed!"); getchar(); goto Error; } Error: hipFree(Ad); hipFree(xd); return cudaStatus; } int main() { //The following is some initialisation for A and x. Change appropriately. for (int i = 0; i < n; i++) { for (int j = 0; j < N; j++) { A[i][j] = 0; } } A[0][0] = 1; for (int i = 0; i < N; i++) x[i] = 0; x[0] = 1; //Launch the kernel hipError_t cudaStatus = launchKernel(); if (cudaStatus != hipSuccess) { fprintf(stderr, "The kernel could not be launched!"); getchar(); return 1; } //Show the resultant vector(in decimal format, grouped as 32 bit words) for (int i = 0; i < N; i++) cout << x[i] << endl; cout << "Press enter to exit. "; getchar(); return 0; }
5ea50699474f166d520c2895217d37db0d534cf9.cu
/* CUDA code for mutiplying bit-matrix A with bit-vector x over the GF(2) field. Author: Shashwat Shukla 15th Dec 2016 Tiled multiplication is performed. 32x32 tiles have been used. C++ does not have a data-type of size one bit. std::bitset is a pseudo-container that stores bits inside integer variables. However, they can't be passed by reference and hence can't be used for GPU compute. std::vector<bool> also suffers from this issue. It also does store bits in contiguous spaces and thus would drastically slow down GPU memory access as coalescing will not be possible. A custom implementation of a bit-set is hence used, which also uses unsigned integers to store and access bits. Note that we could store one bit in one int variable. But this would be wasteful as each integer is made of 32 bits. */ #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <math.h> using namespace std; #define n 10000 //The size of the matrix #define N (n + 31) / 32 //Number of unsigned integers (32 bits each) needed to store the bits. //Define matrices and vectors unsigned int A[n][N]; unsigned int x[N]; unsigned int *Ad, *xd; //Calculates the xor of the 32 bits of an integer __device__ unsigned int xorBits(unsigned int x) { unsigned int temp = 0; #pragma unroll for (int i = 0; i < 32; i++) { temp = temp + (x >> i); } temp = temp % 2; return temp; } //The kernel __global__ void kernel(unsigned int *Ad, unsigned int *xd) { __shared__ unsigned int B[32][32]; //Stores a sub-matrix of A __shared__ unsigned int v[32]; //Stores a part of the vector x __shared__ unsigned int c[32]; //Stores the result for this block //Thread id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int offset = N * 32 * by + N * ty + tx; //Initialise the vector c to zero if (ty == 0) c[tx] = 0; #pragma unroll for (int i = 0; i < (N + 31) / 32; i++) { //Traverse the entire width of the array A (along the column) if ((i * 32 + tx) < N && (by * 32 + ty) < n) //Padding B[ty][tx] = Ad[offset + i * 32]; else B[ty][tx] = 0; if (ty == 0) v[tx] = xd[i * 32 + tx]; __syncthreads(); //Wait for all threads to finish copying data to shared memory c[ty] = c[ty] ^ (B[ty][tx] & v[tx]); __syncthreads(); //Wait for all threads to finish computation and store the result in shared memory if (ty == 0) //Compress the resultant vector c[tx] = xorBits(c[tx]); __syncthreads(); } if (tx == 0 && ty == 0) { //Find the resultant vector from each block unsigned int temp = 0; temp = c[31]; #pragma unroll for (int i = 30; i >= 0; i--) { temp = temp << 1; temp = temp + c[i]; } xd[by] = temp; //Copy result from each block to the final result } } //Launch the kernel and check for errors cudaError_t launchKernel() { cudaError_t cudaStatus; //Define kernel grid dimensions size_t size = N * sizeof(unsigned int); dim3 threadsPerBlock(32, 32); dim3 numBlocks(1, N); // Choose the GPU to run code on cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Check if there is a CUDA enabled GPU?"); getchar(); goto Error; } // Allocate GPU buffers cudaStatus = cudaMalloc((void**)&Ad, n * size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); goto Error; } cudaStatus = cudaMalloc((void**)&xd, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); goto Error; } // Copy input vectors from host memory to GPU. cudaStatus = cudaMemcpy(Ad, A, size * n, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (host to GPU) for A failed!"); getchar(); goto Error; } cudaStatus = cudaMemcpy(xd, x, size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (host to GPU) for x failed!"); getchar(); goto Error; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //Launch kernel kernel << < numBlocks, threadsPerBlock >> > (Ad, xd); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "Kernel Execution time(ms) = " << milliseconds << endl; // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); getchar(); goto Error; } // Copy output vectors from GPU to host memory. cudaStatus = cudaMemcpy(x, xd, size, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy (GPU to host) failed!"); getchar(); goto Error; } Error: cudaFree(Ad); cudaFree(xd); return cudaStatus; } int main() { //The following is some initialisation for A and x. Change appropriately. for (int i = 0; i < n; i++) { for (int j = 0; j < N; j++) { A[i][j] = 0; } } A[0][0] = 1; for (int i = 0; i < N; i++) x[i] = 0; x[0] = 1; //Launch the kernel cudaError_t cudaStatus = launchKernel(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "The kernel could not be launched!"); getchar(); return 1; } //Show the resultant vector(in decimal format, grouped as 32 bit words) for (int i = 0; i < N; i++) cout << x[i] << endl; cout << "Press enter to exit. "; getchar(); return 0; }
b69183820a13a25760fa04faef20335eebb7ab8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION //#ifdef DEBUG #include "include/spectral_matrix.hxx" #include <thrust/device_vector.h> #include <thrust/transform.h> #include "include/debug_macros.h" #include "include/nvgraph_cublas.hxx" #include "include/nvgraph_cusparse.hxx" #include "include/nvgraph_error.hxx" #include "include/nvgraph_vector.hxx" // ========================================================= // Useful macros // ========================================================= // CUDA block size #define BLOCK_SIZE 1024 // Get index of matrix entry #define IDX(i, j, lda) ((i) + (j) * (lda)) namespace nvgraph { // ============================================= // CUDA kernels // ============================================= namespace { /// Apply diagonal matrix to vector template <typename IndexType_, typename ValueType_> static __global__ void diagmv(IndexType_ n, ValueType_ alpha, const ValueType_ *__restrict__ D, const ValueType_ *__restrict__ x, ValueType_ *__restrict__ y) { IndexType_ i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { y[i] += alpha * D[i] * x[i]; i += blockDim.x * gridDim.x; } } /// Apply diagonal matrix to a set of dense vectors (tall matrix) template <typename IndexType_, typename ValueType_, bool beta_is_zero> static __global__ void diagmm(IndexType_ n, IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ D, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) { IndexType_ i, j, index; for (j = threadIdx.y + blockIdx.y * blockDim.y; j < k; j += blockDim.y * gridDim.y) { for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { index = i + j * n; if (beta_is_zero) { y[index] = alpha * D[i] * x[index]; } else { y[index] = alpha * D[i] * x[index] + beta * y[index]; } } } } } // namespace // ============================================= // Dense matrix class // ============================================= /// Constructor for dense matrix class /** @param _trans Whether to transpose matrix. * @param _m Number of rows. * @param _n Number of columns. * @param _A (Input, device memory, _m*_n entries) Matrix * entries, stored column-major. * @param _lda Leading dimension of _A. */ template <typename IndexType_, typename ValueType_> DenseMatrix<IndexType_, ValueType_>::DenseMatrix( bool _trans, IndexType_ _m, IndexType_ _n, const ValueType_ *_A, IndexType_ _lda) : Matrix<IndexType_, ValueType_>(_m, _n), trans(_trans), A(_A), lda(_lda) { Cublas::set_pointer_mode_host(); if (_lda < _m) FatalError("invalid dense matrix parameter (lda<m)", NVGRAPH_ERR_BAD_PARAMETERS); } /// Destructor for dense matrix class template <typename IndexType_, typename ValueType_> DenseMatrix<IndexType_, ValueType_>::~DenseMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::setCUDAStream(hipStream_t _s) { this->s = _s; // printf("DenseMatrix setCUDAStream stream=%p\n",this->s); Cublas::setStream(_s); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::getCUDAStream(hipStream_t *_s) { *_s = this->s; // CHECK_CUBLAS(hipblasGetStream(cublasHandle, _s)); } /// Matrix-vector product for dense matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { Cublas::gemv(this->trans, this->m, this->n, &alpha, this->A, this->lda, x, 1, &beta, y, 1); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { Cublas::gemm( this->trans, false, this->m, k, this->n, &alpha, A, lda, x, this->m, &beta, y, this->n); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { } /// Incomplete Cholesky (setup, factor and solve) template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { printf("ERROR: DenseMatrix prec_setup dispacthed\n"); // exit(1); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { printf("ERROR: DenseMatrix prec_solve dispacthed\n"); // exit(1); } template <typename IndexType_, typename ValueType_> ValueType_ DenseMatrix<IndexType_, ValueType_>::getEdgeSum() const { return 0.0; } // ============================================= // CSR matrix class // ============================================= /// Constructor for CSR matrix class /** @param _transA Whether to transpose matrix. * @param _m Number of rows. * @param _n Number of columns. * @param _nnz Number of non-zero entries. * @param _descrA Matrix properties. * @param _csrValA (Input, device memory, _nnz entries) Matrix * entry values. * @param _csrRowPtrA (Input, device memory, _m+1 entries) Pointer * to first entry in each row. * @param _csrColIndA (Input, device memory, _nnz entries) Column * index of each matrix entry. */ template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_, ValueType_>::CsrMatrix(bool _trans, bool _sym, IndexType_ _m, IndexType_ _n, IndexType_ _nnz, const hipsparseMatDescr_t _descrA, /*const*/ ValueType_ *_csrValA, const IndexType_ *_csrRowPtrA, const IndexType_ *_csrColIndA) : Matrix<IndexType_, ValueType_>(_m, _n), trans(_trans), sym(_sym), nnz(_nnz), descrA(_descrA), csrValA(_csrValA), csrRowPtrA(_csrRowPtrA), csrColIndA(_csrColIndA) { if (nnz < 0) FatalError("invalid CSR matrix parameter (nnz<0)", NVGRAPH_ERR_BAD_PARAMETERS); Cusparse::set_pointer_mode_host(); } /// Destructor for CSR matrix class template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_, ValueType_>::~CsrMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::setCUDAStream(hipStream_t _s) { this->s = _s; // printf("CsrMatrix setCUDAStream stream=%p\n",this->s); Cusparse::setStream(_s); } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::getCUDAStream(hipStream_t *_s) { *_s = this->s; // CHECK_CUSPARSE(hipsparseGetStream(Cusparse::get_handle(), _s)); } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // CHECK_CUSPARSE(cusparseXcsrmm(Cusparse::get_handle(), transA, this->m, k, this->n, nnz, &alpha, // descrA, csrValA, csrRowPtrA, csrColIndA, x, this->n, &beta, y, this->m)); Cusparse::csrmm(this->trans, this->sym, this->m, k, this->n, this->nnz, &alpha, this->csrValA, this->csrRowPtrA, this->csrColIndA, x, this->n, &beta, y, this->m); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { } /// Incomplete Cholesky (setup, factor and solve) template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { // printf("CsrMatrix prec_setup dispacthed\n"); if (!factored) { // analyse lower triangular factor CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_l)); CHECK_CUSPARSE(hipsparseSetMatFillMode(descrA, HIPSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(hipsparseSetMatDiagType(descrA, HIPSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_analysis(Cusparse::get_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->m, nnz, descrA, csrValA, csrRowPtrA, csrColIndA, info_l)); // analyse upper triangular factor CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(hipsparseSetMatFillMode(descrA, HIPSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(hipsparseSetMatDiagType(descrA, HIPSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_analysis(Cusparse::get_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->m, nnz, descrA, csrValA, csrRowPtrA, csrColIndA, info_u)); // perform csrilu0 (should be slightly faster than csric0) CHECK_CUSPARSE(cusparseXcsrilu0(Cusparse::get_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->m, descrA, csrValA, csrRowPtrA, csrColIndA, info_l)); // set factored flag to true factored = true; } } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { // printf("CsrMatrix prec_solve dispacthed (stream %p)\n",this->s); // preconditioning Mx=f (where M = L*U, threfore x=U\(L\f)) // solve lower triangular factor CHECK_CUSPARSE(hipsparseSetMatFillMode(descrA, HIPSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(hipsparseSetMatDiagType(descrA, HIPSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_solve(Cusparse::get_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->m, k, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, info_l, fx, this->m, t, this->m)); // solve upper triangular factor CHECK_CUSPARSE(hipsparseSetMatFillMode(descrA, HIPSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(hipsparseSetMatDiagType(descrA, HIPSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_solve(Cusparse::get_handle(), HIPSPARSE_OPERATION_NON_TRANSPOSE, this->m, k, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, info_u, t, this->m, fx, this->m)); } /// Matrix-vector product for CSR matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // TODO: consider using merge-path csrmv Cusparse::csrmv(this->trans, this->sym, this->m, this->n, this->nnz, &alpha, this->csrValA, this->csrRowPtrA, this->csrColIndA, x, &beta, y); } template <typename IndexType_, typename ValueType_> ValueType_ CsrMatrix<IndexType_, ValueType_>::getEdgeSum() const { return 0.0; } // ============================================= // Laplacian matrix class // ============================================= /// Constructor for Laplacian matrix class /** @param A Adjacency matrix */ template <typename IndexType_, typename ValueType_> LaplacianMatrix<IndexType_, ValueType_>::LaplacianMatrix( /*const*/ Matrix<IndexType_, ValueType_> &_A) : Matrix<IndexType_, ValueType_>(_A.m, _A.n), A(&_A) { // Check that adjacency matrix is square if (_A.m != _A.n) FatalError("cannot construct Laplacian matrix from non-square adjacency matrix", NVGRAPH_ERR_BAD_PARAMETERS); // set CUDA stream this->s = NULL; // Construct degree matrix D.allocate(_A.m, this->s); Vector<ValueType_> ones(this->n, this->s); ones.fill(1.0); _A.mv(1, ones.raw(), 0, D.raw()); // Set preconditioning matrix pointer to NULL M = NULL; } /// Destructor for Laplacian matrix class template <typename IndexType_, typename ValueType_> LaplacianMatrix<IndexType_, ValueType_>::~LaplacianMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::setCUDAStream(hipStream_t _s) { this->s = _s; // printf("LaplacianMatrix setCUDAStream stream=%p\n",this->s); A->setCUDAStream(_s); if (M != NULL) { M->setCUDAStream(_s); } } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::getCUDAStream(hipStream_t *_s) { *_s = this->s; // A->getCUDAStream(_s); } /// Matrix-vector product for Laplacian matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // Scale result vector if (beta == 0) CHECK_CUDA(hipMemset(y, 0, (this->n) * sizeof(ValueType_))) else if (beta != 1) thrust::transform(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y + this->n), thrust::make_constant_iterator(beta), thrust::device_pointer_cast(y), thrust::multiplies<ValueType_>()); // Apply diagonal matrix dim3 gridDim, blockDim; gridDim.x = min(((this->n) + BLOCK_SIZE - 1) / BLOCK_SIZE, 65535); gridDim.y = 1; gridDim.z = 1; blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; hipLaunchKernelGGL(( diagmv), dim3(gridDim), dim3(blockDim), 0, A->s, this->n, alpha, D.raw(), x, y); cudaCheckError(); // Apply adjacency matrix A->mv(-alpha, x, 1, y); } /// Matrix-vector product for Laplacian matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n*k entries) nxk dense matrix. * @param beta Scalar. * @param y (Input/output, device memory, m*k entries) Output mxk dense matrix. */ template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // Apply diagonal matrix ValueType_ one = (ValueType_)1.0; this->dm(k, alpha, x, beta, y); // Apply adjacency matrix A->mm(k, -alpha, x, one, y); } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::dm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { IndexType_ t = k * (this->n); dim3 gridDim, blockDim; // setup launch parameters gridDim.x = min(((this->n) + BLOCK_SIZE - 1) / BLOCK_SIZE, 65535); gridDim.y = min(k, 65535); gridDim.z = 1; blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; // Apply diagonal matrix if (beta == 0.0) { // set vectors to 0 (WARNING: notice that you need to set, not scale, because of NaNs corner // case) CHECK_CUDA(hipMemset(y, 0, t * sizeof(ValueType_))); hipLaunchKernelGGL(( diagmm<IndexType_, ValueType_, true>) , dim3(gridDim), dim3(blockDim), 0, A->s, this->n, k, alpha, D.raw(), x, beta, y); } else { hipLaunchKernelGGL(( diagmm<IndexType_, ValueType_, false>) , dim3(gridDim), dim3(blockDim), 0, A->s, this->n, k, alpha, D.raw(), x, beta, y); } cudaCheckError(); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { } /// Solve preconditioned system M x = f for a set of k vectors template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { // save the pointer to preconditioner M M = _M; if (M != NULL) { // setup the preconditioning matrix M M->prec_setup(NULL); } } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { if (M != NULL) { // preconditioning M->prec_solve(k, alpha, fx, t); } } template <typename IndexType_, typename ValueType_> ValueType_ LaplacianMatrix<IndexType_, ValueType_>::getEdgeSum() const { return 0.0; } // ============================================= // Modularity matrix class // ============================================= /// Constructor for Modularity matrix class /** @param A Adjacency matrix */ template <typename IndexType_, typename ValueType_> ModularityMatrix<IndexType_, ValueType_>::ModularityMatrix( /*const*/ Matrix<IndexType_, ValueType_> &_A, IndexType_ _nnz) : Matrix<IndexType_, ValueType_>(_A.m, _A.n), A(&_A), nnz(_nnz) { // Check that adjacency matrix is square if (_A.m != _A.n) FatalError("cannot construct Modularity matrix from non-square adjacency matrix", NVGRAPH_ERR_BAD_PARAMETERS); // set CUDA stream this->s = NULL; // Construct degree matrix D.allocate(_A.m, this->s); Vector<ValueType_> ones(this->n, this->s); ones.fill(1.0); _A.mv(1, ones.raw(), 0, D.raw()); // D.dump(0,this->n); edge_sum = D.nrm1(); // Set preconditioning matrix pointer to NULL M = NULL; } /// Destructor for Modularity matrix class template <typename IndexType_, typename ValueType_> ModularityMatrix<IndexType_, ValueType_>::~ModularityMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::setCUDAStream(hipStream_t _s) { this->s = _s; // printf("ModularityMatrix setCUDAStream stream=%p\n",this->s); A->setCUDAStream(_s); if (M != NULL) { M->setCUDAStream(_s); } } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::getCUDAStream(hipStream_t *_s) { *_s = this->s; // A->getCUDAStream(_s); } /// Matrix-vector product for Modularity matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // Scale result vector if (alpha != 1 || beta != 0) FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); // CHECK_CUBLAS(cublasXdot(handle, this->n, const double *x, int incx, const double *y, int incy, // double *result)); // y = A*x A->mv(alpha, x, 0, y); ValueType_ dot_res; // gamma = d'*x Cublas::dot(this->n, D.raw(), 1, x, 1, &dot_res); // y = y -(gamma/edge_sum)*d Cublas::axpy(this->n, -(dot_res / this->edge_sum), D.raw(), 1, y, 1); } /// Matrix-vector product for Modularity matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n*k entries) nxk dense matrix. * @param beta Scalar. * @param y (Input/output, device memory, m*k entries) Output mxk dense matrix. */ template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::dm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } /// Solve preconditioned system M x = f for a set of k vectors template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { // save the pointer to preconditioner M M = _M; if (M != NULL) { // setup the preconditioning matrix M M->prec_setup(NULL); } } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { if (M != NULL) { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } } template <typename IndexType_, typename ValueType_> ValueType_ ModularityMatrix<IndexType_, ValueType_>::getEdgeSum() const { return edge_sum; } // Explicit instantiation template class Matrix<int, float>; template class Matrix<int, double>; template class DenseMatrix<int, float>; template class DenseMatrix<int, double>; template class CsrMatrix<int, float>; template class CsrMatrix<int, double>; template class LaplacianMatrix<int, float>; template class LaplacianMatrix<int, double>; template class ModularityMatrix<int, float>; template class ModularityMatrix<int, double>; } // namespace nvgraph //#endif
b69183820a13a25760fa04faef20335eebb7ab8c.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#ifdef NVGRAPH_PARTITION //#ifdef DEBUG #include "include/spectral_matrix.hxx" #include <thrust/device_vector.h> #include <thrust/transform.h> #include "include/debug_macros.h" #include "include/nvgraph_cublas.hxx" #include "include/nvgraph_cusparse.hxx" #include "include/nvgraph_error.hxx" #include "include/nvgraph_vector.hxx" // ========================================================= // Useful macros // ========================================================= // CUDA block size #define BLOCK_SIZE 1024 // Get index of matrix entry #define IDX(i, j, lda) ((i) + (j) * (lda)) namespace nvgraph { // ============================================= // CUDA kernels // ============================================= namespace { /// Apply diagonal matrix to vector template <typename IndexType_, typename ValueType_> static __global__ void diagmv(IndexType_ n, ValueType_ alpha, const ValueType_ *__restrict__ D, const ValueType_ *__restrict__ x, ValueType_ *__restrict__ y) { IndexType_ i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { y[i] += alpha * D[i] * x[i]; i += blockDim.x * gridDim.x; } } /// Apply diagonal matrix to a set of dense vectors (tall matrix) template <typename IndexType_, typename ValueType_, bool beta_is_zero> static __global__ void diagmm(IndexType_ n, IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ D, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) { IndexType_ i, j, index; for (j = threadIdx.y + blockIdx.y * blockDim.y; j < k; j += blockDim.y * gridDim.y) { for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { index = i + j * n; if (beta_is_zero) { y[index] = alpha * D[i] * x[index]; } else { y[index] = alpha * D[i] * x[index] + beta * y[index]; } } } } } // namespace // ============================================= // Dense matrix class // ============================================= /// Constructor for dense matrix class /** @param _trans Whether to transpose matrix. * @param _m Number of rows. * @param _n Number of columns. * @param _A (Input, device memory, _m*_n entries) Matrix * entries, stored column-major. * @param _lda Leading dimension of _A. */ template <typename IndexType_, typename ValueType_> DenseMatrix<IndexType_, ValueType_>::DenseMatrix( bool _trans, IndexType_ _m, IndexType_ _n, const ValueType_ *_A, IndexType_ _lda) : Matrix<IndexType_, ValueType_>(_m, _n), trans(_trans), A(_A), lda(_lda) { Cublas::set_pointer_mode_host(); if (_lda < _m) FatalError("invalid dense matrix parameter (lda<m)", NVGRAPH_ERR_BAD_PARAMETERS); } /// Destructor for dense matrix class template <typename IndexType_, typename ValueType_> DenseMatrix<IndexType_, ValueType_>::~DenseMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::setCUDAStream(cudaStream_t _s) { this->s = _s; // printf("DenseMatrix setCUDAStream stream=%p\n",this->s); Cublas::setStream(_s); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::getCUDAStream(cudaStream_t *_s) { *_s = this->s; // CHECK_CUBLAS(cublasGetStream(cublasHandle, _s)); } /// Matrix-vector product for dense matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { Cublas::gemv(this->trans, this->m, this->n, &alpha, this->A, this->lda, x, 1, &beta, y, 1); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { Cublas::gemm( this->trans, false, this->m, k, this->n, &alpha, A, lda, x, this->m, &beta, y, this->n); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { } /// Incomplete Cholesky (setup, factor and solve) template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { printf("ERROR: DenseMatrix prec_setup dispacthed\n"); // exit(1); } template <typename IndexType_, typename ValueType_> void DenseMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { printf("ERROR: DenseMatrix prec_solve dispacthed\n"); // exit(1); } template <typename IndexType_, typename ValueType_> ValueType_ DenseMatrix<IndexType_, ValueType_>::getEdgeSum() const { return 0.0; } // ============================================= // CSR matrix class // ============================================= /// Constructor for CSR matrix class /** @param _transA Whether to transpose matrix. * @param _m Number of rows. * @param _n Number of columns. * @param _nnz Number of non-zero entries. * @param _descrA Matrix properties. * @param _csrValA (Input, device memory, _nnz entries) Matrix * entry values. * @param _csrRowPtrA (Input, device memory, _m+1 entries) Pointer * to first entry in each row. * @param _csrColIndA (Input, device memory, _nnz entries) Column * index of each matrix entry. */ template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_, ValueType_>::CsrMatrix(bool _trans, bool _sym, IndexType_ _m, IndexType_ _n, IndexType_ _nnz, const cusparseMatDescr_t _descrA, /*const*/ ValueType_ *_csrValA, const IndexType_ *_csrRowPtrA, const IndexType_ *_csrColIndA) : Matrix<IndexType_, ValueType_>(_m, _n), trans(_trans), sym(_sym), nnz(_nnz), descrA(_descrA), csrValA(_csrValA), csrRowPtrA(_csrRowPtrA), csrColIndA(_csrColIndA) { if (nnz < 0) FatalError("invalid CSR matrix parameter (nnz<0)", NVGRAPH_ERR_BAD_PARAMETERS); Cusparse::set_pointer_mode_host(); } /// Destructor for CSR matrix class template <typename IndexType_, typename ValueType_> CsrMatrix<IndexType_, ValueType_>::~CsrMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::setCUDAStream(cudaStream_t _s) { this->s = _s; // printf("CsrMatrix setCUDAStream stream=%p\n",this->s); Cusparse::setStream(_s); } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::getCUDAStream(cudaStream_t *_s) { *_s = this->s; // CHECK_CUSPARSE(cusparseGetStream(Cusparse::get_handle(), _s)); } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // CHECK_CUSPARSE(cusparseXcsrmm(Cusparse::get_handle(), transA, this->m, k, this->n, nnz, &alpha, // descrA, csrValA, csrRowPtrA, csrColIndA, x, this->n, &beta, y, this->m)); Cusparse::csrmm(this->trans, this->sym, this->m, k, this->n, this->nnz, &alpha, this->csrValA, this->csrRowPtrA, this->csrColIndA, x, this->n, &beta, y, this->m); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { } /// Incomplete Cholesky (setup, factor and solve) template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { // printf("CsrMatrix prec_setup dispacthed\n"); if (!factored) { // analyse lower triangular factor CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_l)); CHECK_CUSPARSE(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_analysis(Cusparse::get_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, this->m, nnz, descrA, csrValA, csrRowPtrA, csrColIndA, info_l)); // analyse upper triangular factor CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_analysis(Cusparse::get_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, this->m, nnz, descrA, csrValA, csrRowPtrA, csrColIndA, info_u)); // perform csrilu0 (should be slightly faster than csric0) CHECK_CUSPARSE(cusparseXcsrilu0(Cusparse::get_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, this->m, descrA, csrValA, csrRowPtrA, csrColIndA, info_l)); // set factored flag to true factored = true; } } template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { // printf("CsrMatrix prec_solve dispacthed (stream %p)\n",this->s); // preconditioning Mx=f (where M = L*U, threfore x=U\(L\f)) // solve lower triangular factor CHECK_CUSPARSE(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_solve(Cusparse::get_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, this->m, k, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, info_l, fx, this->m, t, this->m)); // solve upper triangular factor CHECK_CUSPARSE(cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseXcsrsm_solve(Cusparse::get_handle(), CUSPARSE_OPERATION_NON_TRANSPOSE, this->m, k, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, info_u, t, this->m, fx, this->m)); } /// Matrix-vector product for CSR matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void CsrMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // TODO: consider using merge-path csrmv Cusparse::csrmv(this->trans, this->sym, this->m, this->n, this->nnz, &alpha, this->csrValA, this->csrRowPtrA, this->csrColIndA, x, &beta, y); } template <typename IndexType_, typename ValueType_> ValueType_ CsrMatrix<IndexType_, ValueType_>::getEdgeSum() const { return 0.0; } // ============================================= // Laplacian matrix class // ============================================= /// Constructor for Laplacian matrix class /** @param A Adjacency matrix */ template <typename IndexType_, typename ValueType_> LaplacianMatrix<IndexType_, ValueType_>::LaplacianMatrix( /*const*/ Matrix<IndexType_, ValueType_> &_A) : Matrix<IndexType_, ValueType_>(_A.m, _A.n), A(&_A) { // Check that adjacency matrix is square if (_A.m != _A.n) FatalError("cannot construct Laplacian matrix from non-square adjacency matrix", NVGRAPH_ERR_BAD_PARAMETERS); // set CUDA stream this->s = NULL; // Construct degree matrix D.allocate(_A.m, this->s); Vector<ValueType_> ones(this->n, this->s); ones.fill(1.0); _A.mv(1, ones.raw(), 0, D.raw()); // Set preconditioning matrix pointer to NULL M = NULL; } /// Destructor for Laplacian matrix class template <typename IndexType_, typename ValueType_> LaplacianMatrix<IndexType_, ValueType_>::~LaplacianMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::setCUDAStream(cudaStream_t _s) { this->s = _s; // printf("LaplacianMatrix setCUDAStream stream=%p\n",this->s); A->setCUDAStream(_s); if (M != NULL) { M->setCUDAStream(_s); } } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::getCUDAStream(cudaStream_t *_s) { *_s = this->s; // A->getCUDAStream(_s); } /// Matrix-vector product for Laplacian matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // Scale result vector if (beta == 0) CHECK_CUDA(cudaMemset(y, 0, (this->n) * sizeof(ValueType_))) else if (beta != 1) thrust::transform(thrust::device_pointer_cast(y), thrust::device_pointer_cast(y + this->n), thrust::make_constant_iterator(beta), thrust::device_pointer_cast(y), thrust::multiplies<ValueType_>()); // Apply diagonal matrix dim3 gridDim, blockDim; gridDim.x = min(((this->n) + BLOCK_SIZE - 1) / BLOCK_SIZE, 65535); gridDim.y = 1; gridDim.z = 1; blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; diagmv<<<gridDim, blockDim, 0, A->s>>>(this->n, alpha, D.raw(), x, y); cudaCheckError(); // Apply adjacency matrix A->mv(-alpha, x, 1, y); } /// Matrix-vector product for Laplacian matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n*k entries) nxk dense matrix. * @param beta Scalar. * @param y (Input/output, device memory, m*k entries) Output mxk dense matrix. */ template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // Apply diagonal matrix ValueType_ one = (ValueType_)1.0; this->dm(k, alpha, x, beta, y); // Apply adjacency matrix A->mm(k, -alpha, x, one, y); } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::dm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { IndexType_ t = k * (this->n); dim3 gridDim, blockDim; // setup launch parameters gridDim.x = min(((this->n) + BLOCK_SIZE - 1) / BLOCK_SIZE, 65535); gridDim.y = min(k, 65535); gridDim.z = 1; blockDim.x = BLOCK_SIZE; blockDim.y = 1; blockDim.z = 1; // Apply diagonal matrix if (beta == 0.0) { // set vectors to 0 (WARNING: notice that you need to set, not scale, because of NaNs corner // case) CHECK_CUDA(cudaMemset(y, 0, t * sizeof(ValueType_))); diagmm<IndexType_, ValueType_, true> <<<gridDim, blockDim, 0, A->s>>>(this->n, k, alpha, D.raw(), x, beta, y); } else { diagmm<IndexType_, ValueType_, false> <<<gridDim, blockDim, 0, A->s>>>(this->n, k, alpha, D.raw(), x, beta, y); } cudaCheckError(); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { } /// Solve preconditioned system M x = f for a set of k vectors template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { // save the pointer to preconditioner M M = _M; if (M != NULL) { // setup the preconditioning matrix M M->prec_setup(NULL); } } template <typename IndexType_, typename ValueType_> void LaplacianMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { if (M != NULL) { // preconditioning M->prec_solve(k, alpha, fx, t); } } template <typename IndexType_, typename ValueType_> ValueType_ LaplacianMatrix<IndexType_, ValueType_>::getEdgeSum() const { return 0.0; } // ============================================= // Modularity matrix class // ============================================= /// Constructor for Modularity matrix class /** @param A Adjacency matrix */ template <typename IndexType_, typename ValueType_> ModularityMatrix<IndexType_, ValueType_>::ModularityMatrix( /*const*/ Matrix<IndexType_, ValueType_> &_A, IndexType_ _nnz) : Matrix<IndexType_, ValueType_>(_A.m, _A.n), A(&_A), nnz(_nnz) { // Check that adjacency matrix is square if (_A.m != _A.n) FatalError("cannot construct Modularity matrix from non-square adjacency matrix", NVGRAPH_ERR_BAD_PARAMETERS); // set CUDA stream this->s = NULL; // Construct degree matrix D.allocate(_A.m, this->s); Vector<ValueType_> ones(this->n, this->s); ones.fill(1.0); _A.mv(1, ones.raw(), 0, D.raw()); // D.dump(0,this->n); edge_sum = D.nrm1(); // Set preconditioning matrix pointer to NULL M = NULL; } /// Destructor for Modularity matrix class template <typename IndexType_, typename ValueType_> ModularityMatrix<IndexType_, ValueType_>::~ModularityMatrix() { } /// Get and Set CUDA stream template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::setCUDAStream(cudaStream_t _s) { this->s = _s; // printf("ModularityMatrix setCUDAStream stream=%p\n",this->s); A->setCUDAStream(_s); if (M != NULL) { M->setCUDAStream(_s); } } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::getCUDAStream(cudaStream_t *_s) { *_s = this->s; // A->getCUDAStream(_s); } /// Matrix-vector product for Modularity matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n entries) Vector. * @param beta Scalar. * @param y (Input/output, device memory, m entries) Output vector. */ template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::mv(ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { // Scale result vector if (alpha != 1 || beta != 0) FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); // CHECK_CUBLAS(cublasXdot(handle, this->n, const double *x, int incx, const double *y, int incy, // double *result)); // y = A*x A->mv(alpha, x, 0, y); ValueType_ dot_res; // gamma = d'*x Cublas::dot(this->n, D.raw(), 1, x, 1, &dot_res); // y = y -(gamma/edge_sum)*d Cublas::axpy(this->n, -(dot_res / this->edge_sum), D.raw(), 1, y, 1); } /// Matrix-vector product for Modularity matrix class /** y is overwritten with alpha*A*x+beta*y. * * @param alpha Scalar. * @param x (Input, device memory, n*k entries) nxk dense matrix. * @param beta Scalar. * @param y (Input/output, device memory, m*k entries) Output mxk dense matrix. */ template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::mm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::dm(IndexType_ k, ValueType_ alpha, const ValueType_ *__restrict__ x, ValueType_ beta, ValueType_ *__restrict__ y) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } /// Color and Reorder template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::color(IndexType_ *c, IndexType_ *p) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::reorder(IndexType_ *p) const { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } /// Solve preconditioned system M x = f for a set of k vectors template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::prec_setup(Matrix<IndexType_, ValueType_> *_M) { // save the pointer to preconditioner M M = _M; if (M != NULL) { // setup the preconditioning matrix M M->prec_setup(NULL); } } template <typename IndexType_, typename ValueType_> void ModularityMatrix<IndexType_, ValueType_>::prec_solve(IndexType_ k, ValueType_ alpha, ValueType_ *__restrict__ fx, ValueType_ *__restrict__ t) const { if (M != NULL) { FatalError("This isn't implemented for Modularity Matrix currently", NVGRAPH_ERR_NOT_IMPLEMENTED); } } template <typename IndexType_, typename ValueType_> ValueType_ ModularityMatrix<IndexType_, ValueType_>::getEdgeSum() const { return edge_sum; } // Explicit instantiation template class Matrix<int, float>; template class Matrix<int, double>; template class DenseMatrix<int, float>; template class DenseMatrix<int, double>; template class CsrMatrix<int, float>; template class CsrMatrix<int, double>; template class LaplacianMatrix<int, float>; template class LaplacianMatrix<int, double>; template class ModularityMatrix<int, float>; template class ModularityMatrix<int, double>; } // namespace nvgraph //#endif
57009aec7ec80182080c0c7d15581d3f89e36528.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int x = blockIdx.x; int y = blockIdx.y; uchar4 colorValue = rgbaImage[x * numCols + y]; float greyValue = 0.299f * colorValue.x + 0.587f * colorValue.y + 0.114f * colorValue.z; greyImage[x * numCols + y] = greyValue; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 threadsPerBlock( 1, 1, 1); //TODO const dim3 numberOfBlocks( numRows, numCols, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
57009aec7ec80182080c0c7d15581d3f89e36528.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int x = blockIdx.x; int y = blockIdx.y; uchar4 colorValue = rgbaImage[x * numCols + y]; float greyValue = 0.299f * colorValue.x + 0.587f * colorValue.y + 0.114f * colorValue.z; greyImage[x * numCols + y] = greyValue; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 threadsPerBlock( 1, 1, 1); //TODO const dim3 numberOfBlocks( numRows, numCols, 1); //TODO rgba_to_greyscale<<<numberOfBlocks, threadsPerBlock>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }