hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
cffe4b78f7f1487913ebd68e90f77cef2f04acd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
typedef unsigned short int u16;
typedef unsigned int u32;
#define NUM_ELEMENTS 2048
#define MAX_NUM_LISTS 16
__host__ void cpu_sort(u32 * const data, const u32 num_elements)
{
static u32 cpu_tmp_0[NUM_ELEMENTS];
static u32 cpu_tmp_1[NUM_ELEMENTS];
for(u32 bit=0;bit<32;bit++)
{
const u32 bit_mask = (1 << bit);
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0; i<num_elements; i++)
{
const u32 d = data[i];
if((d & bit_mask) > 0)
{
cpu_tmp_1[base_cnt_1] = d;
base_cnt_1++;
}
else
{
cpu_tmp_0[base_cnt_0] = d;
base_cnt_0++;
}
}
// Copy data back to the source
// First the zero list, then the one list
for(u32 i=0; i<base_cnt_0; i++)
{
data[i] = cpu_tmp_0[i];
}
for(u32 i = 0; i<base_cnt_1; i++)
{
data[base_cnt_0+i] = cpu_tmp_1[i];
}
}
}
__device__ void radix_sort(u32 * const sort_tmp,
const u32 num_lists,
const u32 num_elements,
const u32 tid,
u32 * const sort_tmp_0,
u32 * const sort_tmp_1)
{
//Sort into num_list, listd
//Apply radix sort on 32 bits of data
for(u32 bit=0;bit<32;bit++)
{
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0; i<num_elements; i+=num_lists)
{
const u32 elem = sort_tmp[i+tid];
const u32 bit_mask = (1 << bit);
if((elem & bit_mask) > 0)
{
sort_tmp_1[base_cnt_1+tid] = elem;
base_cnt_1+=num_lists;
}
else
{
sort_tmp_0[base_cnt_0+tid] = elem;
base_cnt_0+=num_lists;
}
}
// Copy data back to source - first the zero list
for(u32 i=0;i<base_cnt_0;i+=num_lists)
{
sort_tmp[i+tid] = sort_tmp_0[i+tid];
}
//Copy data back to source - then the one list
for(u32 i=0;i<base_cnt_1; i+=num_lists)
{
sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid];
}
}
__syncthreads();
}
__device__ void radix_sort2(u32 * const sort_tmp,
const u32 num_lists,
const u32 num_elements,
const u32 tid,
u32 * const sort_tmp_0,
u32 * const sort_tmp_1)
{
//Sort into num_list, listd
//Apply radix sort on 32 bits of data
for(u32 bit=0;bit<32;bit++)
{
const u32 bit_mask = (1 << bit);
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0; i<num_elements; i+=num_lists)
{
const u32 elem = sort_tmp[i+tid];
if((elem & bit_mask) > 0)
{
sort_tmp_1[base_cnt_1+tid] = elem;
base_cnt_1+=num_lists;
}
else
{
sort_tmp_0[base_cnt_0+tid] = elem;
base_cnt_0+=num_lists;
}
}
//Copy data back to source - then the one list
for(u32 i=0;i<base_cnt_1; i+=num_lists)
{
sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid];
}
}
__syncthreads();
}
u32 find_min(const u32 * const src_array,
u32 * const list_indexes,
const u32 num_lists,
const u32 num_elements_per_list)
{
u32 min_val = 0xFFFFFFF;
u32 min_idx = 0;
// Iterate over each of the lists
for(u32 i=0; i<num_lists; i++)
{
// If the current list ahs already been emptied
// then ignore it
if(list_indexes[i] < num_elements_per_list)
{
const u32 src_idx = i + (list_indexes[i] * num_lists);
const u32 data = src_array[src_idx];
if(data <= min_val)
{
min_val = data;
min_idx = i;
}
}
}
list_indexes[min_idx]++;
return min_val;
}
void merge_array(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements)
{
const u32 num_elements_per_list = (num_elements / num_lists);
unsigned int list_indexes[MAX_NUM_LISTS];
for(u32 list=0; list < MAX_NUM_LISTS; list++)
{
list_indexes[list] = 0;
}
for(u32 i=0; i<num_elements; i++)
{
dest_array[i] = find_min(src_array,
list_indexes,
num_lists,
num_elements_per_list);
}
}
__device__ void copy_data_to_shared(const u32 * const data,
u32 * const sort_tmp,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
// Copy data into temp store
for(u32 i = 0; i<num_elements; i++)
{
sort_tmp[i+tid] = data[i+tid];
}
__syncthreads();
}
// Uses a single thread for merge
__device__ void merge_array1(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
__shared__ u32 list_indexes[MAX_NUM_LISTS];
// Multiple threads
list_indexes[tid] = 0;
__syncthreads();
// Single threaded
if(tid == 0)
{
const u32 num_elements_per_list = (num_elements / num_lists);
for (u32 i = 0; i < num_elements; i++)
{
u32 min_val = 0xFFFFFFFF;
u32 min_idx = 0;
// Iterate over each of the lists
for(u32 list=0; list<num_lists;list++)
{
//If the current list has already been emptied then ignored it
if(list_indexes[list] < num_elements_per_list)
{
const u32 src_idx = list + (list_indexes[list] * num_lists);
const u32 data = src_array[src_idx];
if(data <= min_val)
{
min_val = data;
min_idx = list;
}
}
}
list_indexes[min_idx]++;
dest_array[i] = min_val;
}
}
}
__global__ void gpu_sort_array_array(u32 * const data,
const u32 num_lists,
const u32 num_elements)
{
const u32 tid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ u32 sort_tmp[NUM_ELEMENTS];
__shared__ u32 sort_tmp_0[NUM_ELEMENTS];
__shared__ u32 sort_tmp_1[NUM_ELEMENTS];
copy_data_to_shared(data, sort_tmp, num_lists,
num_elements, tid);
radix_sort2(sort_tmp, num_lists, num_elements, tid, sort_tmp_0, sort_tmp_1);
merge_array1(sort_tmp, data, num_lists, num_elements, tid);
}
// Uses multiple threads for merge
// Deals with multiple identical entries in the data
__device__ void merge_array6(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
const u32 num_elements_per_list = (num_elements / num_lists);
__shared__ u32 list_indexes[MAX_NUM_LISTS];
list_indexes[tid] = 0;
//Wait for list_indexes[tid] to be cleared
__syncthreads();
//Iterate over all elements
for(u32 i=0; i<num_elements; i++)
{
//Create a value shared with other threads
__shared__ u32 min_val;
__shared__ u32 min_tid;
// Use a temp register for work purposes
u32 data;
//If the current list has not already been
//emptied then read from it, else ignore it
if(list_indexes[tid] < num_elements_per_list)
{
//Work out from the list_index, the index into
// the linear array
const u32 src_idx = tid + (list_indexes[tid] * num_lists);
//Read the data from the list for the given
// thread
data = src_array[src_idx];
}
else
{
data = 0xFFFFFFFF;
}
//Have thread zero clear the min values
if(tid == 0)
{
// Write a very large value so the first
// thread wins with the min
min_val = 0xFFFFFFFF;
min_tid = 0xFFFFFFFF;
}
// Wait for all threads
__syncthreads();
// Have every thread try to store it's value into
// min_val. Only the thread with the lowest value
// will win.
atomicMin(&min_val, data);
//Make sure all threads have taken their turn
__syncthreads();
// If this thread was the one with the minimum
if(min_val == data)
{
// Check for equal values
// Lowest tid wins, and does the write
atomicMin(&min_tid, tid);
}
// Make sure all threads have taken their turn.
__syncthreads();
// If this thread has the lowest tid
if(tid == min_tid)
{
// Increment the list pointer for this thread
list_indexes[tid]++;
// Store the winning value
dest_array[i] = data;
}
}
}
// Uses multiple threads for reduction type merge
__device__ void merge_array5(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
const u32 num_elements_per_list = (num_elements / num_lists);
__shared__ u32 list_indexes[MAX_NUM_LISTS];
__shared__ u32 reduction_val[MAX_NUM_LISTS];
__shared__ u32 reduction_idx[MAX_NUM_LISTS];
//Clear the working sets
list_indexes[tid] = 0;
reduction_val[tid] = 0;
reduction_idx[tid] = 0;
__syncthreads();
for(u32 i=0; i<num_elements; i++)
{
// We need (num_lists / 2) active threads
u32 tid_max = num_lists >> 1;
u32 data;
// If the current list has already been
// emptied then ignore it
if(list_indexes[tid] < num_elements_per_list)
{
const u32 src_idx = tid + (list_indexes[tid] * num_lists);
data = src_array[src_idx];
}
else
{
data = 0xFFFFFFFF;
}
reduction_val[tid] = data;
reduction_idx[tid] = tid;
__syncthreads();
while(tid_max != 0)
{
if(tid < tid_max)
{
const u32 val2_idx = tid + tid_max;
const u32 val2 = reduction_val[val2_idx];
if(reduction_val[tid] > val2)
{
reduction_val[tid] = val2;
reduction_idx[tid] = reduction_idx[val2_idx];
}
}
tid_max >>= 1;
__syncthreads();
}
if(tid == 0)
{
list_indexes[reduction_idx[0]]++;
dest_array[i] = reduction_val[0];
}
__syncthreads();
}
}
#define REDUCTION_SIZE 8
#define REDUCTION_SIZE_BIT_SHIFT 3
#define MAX_ACTIVE_REDUCTIONS ((MAX_NUM_LISTS) / REDUCTION_SIZE)
__device__ void merge_array9(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
u32 data = src_array[tid];
const u32 s_idx = tid >> REDUCTION_SIZE_BIT_SHIFT;
const u32 num_reductions = num_lists >> REDUCTION_SIZE_BIT_SHIFT;
const u32 num_elements_per_list = (num_elements / num_lists);
__shared__ u32 list_indexes[MAX_NUM_LISTS];
list_indexes[tid] = 0;
for(u32 i=0; i<num_elements; i++)
{
__shared__ u32 min_val[MAX_ACTIVE_REDUCTIONS];
__shared__ u32 min_tid;
if(tid < num_lists)
{
min_val[s_idx] = 0xFFFFFFFF;
min_tid = 0xFFFFFFFF;
}
__syncthreads();
atomicMin(&min_val[s_idx], data);
if(num_reductions > 0)
{
__syncthreads();
if(tid < num_reductions)
{
atomicMin(&min_val[0], min_val[tid]);
}
__syncthreads();
}
if(min_val[0] == data)
{
atomicMin(&min_tid, tid);
}
__syncthreads();
if(tid == min_tid)
{
list_indexes[tid]++;
dest_array[i] = data;
if(list_indexes[tid] < num_elements_per_list)
{
data = src_array[tid + (list_indexes[tid] * num_lists)];
}
else
{
data = 0xFFFFFFFF;
}
}
__syncthreads();
}
}
void execute_host_functions()
{
}
void execute_gpu_functions()
{
u32 *d = NULL;
unsigned int idata[NUM_ELEMENTS], odata[NUM_ELEMENTS];
int i;
for (i = 0; i < NUM_ELEMENTS; i++){
// idata[i] = (unsigned int) i;
idata[i] = (unsigned int)NUM_ELEMENTS - i;
}
hipMalloc((void** ) &d, sizeof(int) * NUM_ELEMENTS);
hipMemcpy(d, idata, sizeof(unsigned int) * NUM_ELEMENTS, hipMemcpyHostToDevice);
//Call GPU kernels
hipLaunchKernelGGL(( gpu_sort_array_array), dim3(1), dim3(NUM_ELEMENTS), 0, 0, d,MAX_NUM_LISTS,NUM_ELEMENTS);
hipDeviceSynchronize(); // Wait for the GPU launched work to complete
hipGetLastError();
hipMemcpy(odata, d, sizeof(int) * NUM_ELEMENTS, hipMemcpyDeviceToHost);
for (i = 0; i < NUM_ELEMENTS; i++) {
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
hipFree((void* ) d);
hipDeviceReset();
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
execute_host_functions();
execute_gpu_functions();
return 0;
}
| cffe4b78f7f1487913ebd68e90f77cef2f04acd3.cu | #include <stdio.h>
#include <stdlib.h>
typedef unsigned short int u16;
typedef unsigned int u32;
#define NUM_ELEMENTS 2048
#define MAX_NUM_LISTS 16
__host__ void cpu_sort(u32 * const data, const u32 num_elements)
{
static u32 cpu_tmp_0[NUM_ELEMENTS];
static u32 cpu_tmp_1[NUM_ELEMENTS];
for(u32 bit=0;bit<32;bit++)
{
const u32 bit_mask = (1 << bit);
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0; i<num_elements; i++)
{
const u32 d = data[i];
if((d & bit_mask) > 0)
{
cpu_tmp_1[base_cnt_1] = d;
base_cnt_1++;
}
else
{
cpu_tmp_0[base_cnt_0] = d;
base_cnt_0++;
}
}
// Copy data back to the source
// First the zero list, then the one list
for(u32 i=0; i<base_cnt_0; i++)
{
data[i] = cpu_tmp_0[i];
}
for(u32 i = 0; i<base_cnt_1; i++)
{
data[base_cnt_0+i] = cpu_tmp_1[i];
}
}
}
__device__ void radix_sort(u32 * const sort_tmp,
const u32 num_lists,
const u32 num_elements,
const u32 tid,
u32 * const sort_tmp_0,
u32 * const sort_tmp_1)
{
//Sort into num_list, listd
//Apply radix sort on 32 bits of data
for(u32 bit=0;bit<32;bit++)
{
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0; i<num_elements; i+=num_lists)
{
const u32 elem = sort_tmp[i+tid];
const u32 bit_mask = (1 << bit);
if((elem & bit_mask) > 0)
{
sort_tmp_1[base_cnt_1+tid] = elem;
base_cnt_1+=num_lists;
}
else
{
sort_tmp_0[base_cnt_0+tid] = elem;
base_cnt_0+=num_lists;
}
}
// Copy data back to source - first the zero list
for(u32 i=0;i<base_cnt_0;i+=num_lists)
{
sort_tmp[i+tid] = sort_tmp_0[i+tid];
}
//Copy data back to source - then the one list
for(u32 i=0;i<base_cnt_1; i+=num_lists)
{
sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid];
}
}
__syncthreads();
}
__device__ void radix_sort2(u32 * const sort_tmp,
const u32 num_lists,
const u32 num_elements,
const u32 tid,
u32 * const sort_tmp_0,
u32 * const sort_tmp_1)
{
//Sort into num_list, listd
//Apply radix sort on 32 bits of data
for(u32 bit=0;bit<32;bit++)
{
const u32 bit_mask = (1 << bit);
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0; i<num_elements; i+=num_lists)
{
const u32 elem = sort_tmp[i+tid];
if((elem & bit_mask) > 0)
{
sort_tmp_1[base_cnt_1+tid] = elem;
base_cnt_1+=num_lists;
}
else
{
sort_tmp_0[base_cnt_0+tid] = elem;
base_cnt_0+=num_lists;
}
}
//Copy data back to source - then the one list
for(u32 i=0;i<base_cnt_1; i+=num_lists)
{
sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid];
}
}
__syncthreads();
}
u32 find_min(const u32 * const src_array,
u32 * const list_indexes,
const u32 num_lists,
const u32 num_elements_per_list)
{
u32 min_val = 0xFFFFFFF;
u32 min_idx = 0;
// Iterate over each of the lists
for(u32 i=0; i<num_lists; i++)
{
// If the current list ahs already been emptied
// then ignore it
if(list_indexes[i] < num_elements_per_list)
{
const u32 src_idx = i + (list_indexes[i] * num_lists);
const u32 data = src_array[src_idx];
if(data <= min_val)
{
min_val = data;
min_idx = i;
}
}
}
list_indexes[min_idx]++;
return min_val;
}
void merge_array(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements)
{
const u32 num_elements_per_list = (num_elements / num_lists);
unsigned int list_indexes[MAX_NUM_LISTS];
for(u32 list=0; list < MAX_NUM_LISTS; list++)
{
list_indexes[list] = 0;
}
for(u32 i=0; i<num_elements; i++)
{
dest_array[i] = find_min(src_array,
list_indexes,
num_lists,
num_elements_per_list);
}
}
__device__ void copy_data_to_shared(const u32 * const data,
u32 * const sort_tmp,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
// Copy data into temp store
for(u32 i = 0; i<num_elements; i++)
{
sort_tmp[i+tid] = data[i+tid];
}
__syncthreads();
}
// Uses a single thread for merge
__device__ void merge_array1(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
__shared__ u32 list_indexes[MAX_NUM_LISTS];
// Multiple threads
list_indexes[tid] = 0;
__syncthreads();
// Single threaded
if(tid == 0)
{
const u32 num_elements_per_list = (num_elements / num_lists);
for (u32 i = 0; i < num_elements; i++)
{
u32 min_val = 0xFFFFFFFF;
u32 min_idx = 0;
// Iterate over each of the lists
for(u32 list=0; list<num_lists;list++)
{
//If the current list has already been emptied then ignored it
if(list_indexes[list] < num_elements_per_list)
{
const u32 src_idx = list + (list_indexes[list] * num_lists);
const u32 data = src_array[src_idx];
if(data <= min_val)
{
min_val = data;
min_idx = list;
}
}
}
list_indexes[min_idx]++;
dest_array[i] = min_val;
}
}
}
__global__ void gpu_sort_array_array(u32 * const data,
const u32 num_lists,
const u32 num_elements)
{
const u32 tid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ u32 sort_tmp[NUM_ELEMENTS];
__shared__ u32 sort_tmp_0[NUM_ELEMENTS];
__shared__ u32 sort_tmp_1[NUM_ELEMENTS];
copy_data_to_shared(data, sort_tmp, num_lists,
num_elements, tid);
radix_sort2(sort_tmp, num_lists, num_elements, tid, sort_tmp_0, sort_tmp_1);
merge_array1(sort_tmp, data, num_lists, num_elements, tid);
}
// Uses multiple threads for merge
// Deals with multiple identical entries in the data
__device__ void merge_array6(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
const u32 num_elements_per_list = (num_elements / num_lists);
__shared__ u32 list_indexes[MAX_NUM_LISTS];
list_indexes[tid] = 0;
//Wait for list_indexes[tid] to be cleared
__syncthreads();
//Iterate over all elements
for(u32 i=0; i<num_elements; i++)
{
//Create a value shared with other threads
__shared__ u32 min_val;
__shared__ u32 min_tid;
// Use a temp register for work purposes
u32 data;
//If the current list has not already been
//emptied then read from it, else ignore it
if(list_indexes[tid] < num_elements_per_list)
{
//Work out from the list_index, the index into
// the linear array
const u32 src_idx = tid + (list_indexes[tid] * num_lists);
//Read the data from the list for the given
// thread
data = src_array[src_idx];
}
else
{
data = 0xFFFFFFFF;
}
//Have thread zero clear the min values
if(tid == 0)
{
// Write a very large value so the first
// thread wins with the min
min_val = 0xFFFFFFFF;
min_tid = 0xFFFFFFFF;
}
// Wait for all threads
__syncthreads();
// Have every thread try to store it's value into
// min_val. Only the thread with the lowest value
// will win.
atomicMin(&min_val, data);
//Make sure all threads have taken their turn
__syncthreads();
// If this thread was the one with the minimum
if(min_val == data)
{
// Check for equal values
// Lowest tid wins, and does the write
atomicMin(&min_tid, tid);
}
// Make sure all threads have taken their turn.
__syncthreads();
// If this thread has the lowest tid
if(tid == min_tid)
{
// Increment the list pointer for this thread
list_indexes[tid]++;
// Store the winning value
dest_array[i] = data;
}
}
}
// Uses multiple threads for reduction type merge
__device__ void merge_array5(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
const u32 num_elements_per_list = (num_elements / num_lists);
__shared__ u32 list_indexes[MAX_NUM_LISTS];
__shared__ u32 reduction_val[MAX_NUM_LISTS];
__shared__ u32 reduction_idx[MAX_NUM_LISTS];
//Clear the working sets
list_indexes[tid] = 0;
reduction_val[tid] = 0;
reduction_idx[tid] = 0;
__syncthreads();
for(u32 i=0; i<num_elements; i++)
{
// We need (num_lists / 2) active threads
u32 tid_max = num_lists >> 1;
u32 data;
// If the current list has already been
// emptied then ignore it
if(list_indexes[tid] < num_elements_per_list)
{
const u32 src_idx = tid + (list_indexes[tid] * num_lists);
data = src_array[src_idx];
}
else
{
data = 0xFFFFFFFF;
}
reduction_val[tid] = data;
reduction_idx[tid] = tid;
__syncthreads();
while(tid_max != 0)
{
if(tid < tid_max)
{
const u32 val2_idx = tid + tid_max;
const u32 val2 = reduction_val[val2_idx];
if(reduction_val[tid] > val2)
{
reduction_val[tid] = val2;
reduction_idx[tid] = reduction_idx[val2_idx];
}
}
tid_max >>= 1;
__syncthreads();
}
if(tid == 0)
{
list_indexes[reduction_idx[0]]++;
dest_array[i] = reduction_val[0];
}
__syncthreads();
}
}
#define REDUCTION_SIZE 8
#define REDUCTION_SIZE_BIT_SHIFT 3
#define MAX_ACTIVE_REDUCTIONS ((MAX_NUM_LISTS) / REDUCTION_SIZE)
__device__ void merge_array9(const u32 * const src_array,
u32 * const dest_array,
const u32 num_lists,
const u32 num_elements,
const u32 tid)
{
u32 data = src_array[tid];
const u32 s_idx = tid >> REDUCTION_SIZE_BIT_SHIFT;
const u32 num_reductions = num_lists >> REDUCTION_SIZE_BIT_SHIFT;
const u32 num_elements_per_list = (num_elements / num_lists);
__shared__ u32 list_indexes[MAX_NUM_LISTS];
list_indexes[tid] = 0;
for(u32 i=0; i<num_elements; i++)
{
__shared__ u32 min_val[MAX_ACTIVE_REDUCTIONS];
__shared__ u32 min_tid;
if(tid < num_lists)
{
min_val[s_idx] = 0xFFFFFFFF;
min_tid = 0xFFFFFFFF;
}
__syncthreads();
atomicMin(&min_val[s_idx], data);
if(num_reductions > 0)
{
__syncthreads();
if(tid < num_reductions)
{
atomicMin(&min_val[0], min_val[tid]);
}
__syncthreads();
}
if(min_val[0] == data)
{
atomicMin(&min_tid, tid);
}
__syncthreads();
if(tid == min_tid)
{
list_indexes[tid]++;
dest_array[i] = data;
if(list_indexes[tid] < num_elements_per_list)
{
data = src_array[tid + (list_indexes[tid] * num_lists)];
}
else
{
data = 0xFFFFFFFF;
}
}
__syncthreads();
}
}
void execute_host_functions()
{
}
void execute_gpu_functions()
{
u32 *d = NULL;
unsigned int idata[NUM_ELEMENTS], odata[NUM_ELEMENTS];
int i;
for (i = 0; i < NUM_ELEMENTS; i++){
// idata[i] = (unsigned int) i;
idata[i] = (unsigned int)NUM_ELEMENTS - i;
}
cudaMalloc((void** ) &d, sizeof(int) * NUM_ELEMENTS);
cudaMemcpy(d, idata, sizeof(unsigned int) * NUM_ELEMENTS, cudaMemcpyHostToDevice);
//Call GPU kernels
gpu_sort_array_array<<<1, NUM_ELEMENTS>>>(d,MAX_NUM_LISTS,NUM_ELEMENTS);
cudaThreadSynchronize(); // Wait for the GPU launched work to complete
cudaGetLastError();
cudaMemcpy(odata, d, sizeof(int) * NUM_ELEMENTS, cudaMemcpyDeviceToHost);
for (i = 0; i < NUM_ELEMENTS; i++) {
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
cudaFree((void* ) d);
cudaDeviceReset();
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
execute_host_functions();
execute_gpu_functions();
return 0;
}
|
06a99e4000390297611fe997082c5ab9d5a91fbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_fp16.h>
#include "fp16_conversion.h"
#define A1 (__float2half2_rn(0.31938153))
#define A2 (__float2half2_rn(-0.356563782))
#define A3 (__float2half2_rn(1.781477937))
#define A4 (__float2half2_rn(-1.821255978))
#define A5 (__float2half2_rn(1.330274429))
#define RSQRT2PI (__float2half2_rn(0.3989422804))
__device__ half2 absh2(half2 val)
{
unsigned int val_s = *((unsigned int *)&val);
val_s = val_s & 0x7FFF7FFF;
return *((half2 *)&val_s);
}
__device__ half2 cndGPU(half2 d)
{
half2 c1 = __float2half2_rn(1.0f);
half2 c2 = __float2half2_rn(0.2316419f);
half2 c3 = __float2half2_rn(-0.5f);
half2 c4 = __float2half2_rn(0.0);
half2
K = h2rcp(__hfma2(c2, absh2(d), c1));
half2
cnd = __hmul2(RSQRT2PI, __hmul2(h2exp(__hmul2(c3, __hmul2(d, d))),
(__hmul2(K, __hfma2(K,__hfma2(K, __hfma2(K, __hfma2(K, A5, A4), A3), A2), A1)))));
// if(d > 0)
// cnd = 1.0 - cnd;
half2 val_gt = __hgt2(d, c4);
//unsigned int temp = ((*((unsigned int *)&val_gt) & 0x2FFF2FFF) << 2) ^ *((unsigned int *)&cnd);
//cnd = *((half2 *)&temp);
//cnd = __hadd2(val_gt, cnd);
half one = __low2half(c1);
half cnd_l = __low2half(cnd);
half cnd_h = __high2half(cnd);
if(__heq(__low2half(val_gt), one))
{
cnd_l = __hsub(one,cnd_l);
}
if(__heq(__high2half(val_gt), one))
{
cnd_h = __hsub(one,cnd_h);
}
return __halves2half2(cnd_l, cnd_h);
}
///////////////////////////////////////////////////////////////////////////////
// Black-Scholes formula for both call and put
///////////////////////////////////////////////////////////////////////////////
__device__ void BlackScholesBodyGPU
(
half2& CallResult,
half2& PutResult,
half2 S, //Stock price
half2 X, //Option strike
half2 T, //Option years
float _R, //Riskless rate
float _V //Volatility rate
)
{
half2 R = __float2half2_rn(_R);
half2 V = __float2half2_rn(_V);
half2 sqrtT, expRT;
half2 d1, d2, CNDD1, CNDD2;
half2 c1 = __float2half2_rn(1.0f);
half2 c2 = __float2half2_rn(0.5f);
sqrtT = h2sqrt(T);
d1 = __hmul2(__hfma2(T, __hfma2(c2, __hmul2(V, V), R), h2log(__hmul2(S, h2rcp(X)))), h2rcp(__hmul2(V, sqrtT)));
d2 = __hsub2(d1, __hmul2(V, sqrtT));
CNDD1 = cndGPU(d1);
CNDD2 = cndGPU(d2);
//printf("%.15f,%.15f,", __half22float2(CNDD1).x, __half22float2(CNDD1).y);
//Calculate Call and Put simultaneously
expRT = h2exp(__hmul2(__hneg2(R), T));
CallResult = __hsub2(__hmul2(S, CNDD1), __hmul2(X, __hmul2(expRT, CNDD2)));
PutResult = __hsub2(__hmul2(X, __hmul2(expRT , __hsub2(c1, CNDD2))), __hmul2(S, __hsub2(c1, CNDD1)));
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of optN options on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void BlackScholesGPU(
half2 *d_CallResult,
half2 *d_PutResult,
half2 *d_StockPrice,
half2 *d_OptionStrike,
half2 *d_OptionYears,
float Riskfree,
float Volatility,
int optN
)
{
//Thread index
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Total number of threads in execution grid
const int THREAD_N = blockDim.x * gridDim.x;
//No matter how small is execution grid or how large OptN is,
//exactly OptN indices will be processed with perfect memory coalescing
for(int opt = tid; opt < optN/2; opt += THREAD_N)
BlackScholesBodyGPU(
d_CallResult[opt],
d_PutResult[opt],
d_StockPrice[opt],
d_OptionStrike[opt],
d_OptionYears[opt],
Riskfree,
Volatility
);
}
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
#define OPT_N 400000
const int NUM_ITERATIONS = 512;
const int OPT_SZ = OPT_N * sizeof(half);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
int main()
{
half * h_CallResultGPU = (half *)malloc(OPT_SZ);
half * h_PutResultGPU = (half *)malloc(OPT_SZ);
half * h_StockPrice = (half *)malloc(OPT_SZ);
half * h_OptionStrike = (half *)malloc(OPT_SZ);
half * h_OptionYears = (half *)malloc(OPT_SZ);
half
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
hipMalloc((void **)&d_CallResult, OPT_SZ);
hipMalloc((void **)&d_PutResult, OPT_SZ);
hipMalloc((void **)&d_StockPrice, OPT_SZ);
hipMalloc((void **)&d_OptionStrike, OPT_SZ);
hipMalloc((void **)&d_OptionYears, OPT_SZ);
srand(5347);
//Generate options set
int i;
for(i = 0; i < OPT_N; i++)
{
h_CallResultGPU[i] = approx_float_to_half(0.0f);
h_PutResultGPU[i] = approx_float_to_half(-1.0f);
h_StockPrice[i] = approx_float_to_half(RandFloat(5.0f, 30.0f));
h_OptionStrike[i] = approx_float_to_half(RandFloat(1.0f, 100.0f));
h_OptionYears[i] = approx_float_to_half(RandFloat(0.25f, 10.0f));
}
hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice);
hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice);
hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice);
for(i = 0; i < NUM_ITERATIONS; i++){
hipLaunchKernelGGL(( BlackScholesGPU), dim3(256), dim3(128), 0, 0,
(half2 *)d_CallResult,
(half2 *)d_PutResult,
(half2 *)d_OptionStrike,
(half2 *)d_StockPrice,
(half2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
}
hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost);
hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost);
//for(i = 0; i < OPT_N; i++)
// printf("%.15f,", half_to_float(h_CallResultGPU[i]));
return 0;
} | 06a99e4000390297611fe997082c5ab9d5a91fbd.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda_fp16.h>
#include "fp16_conversion.h"
#define A1 (__float2half2_rn(0.31938153))
#define A2 (__float2half2_rn(-0.356563782))
#define A3 (__float2half2_rn(1.781477937))
#define A4 (__float2half2_rn(-1.821255978))
#define A5 (__float2half2_rn(1.330274429))
#define RSQRT2PI (__float2half2_rn(0.3989422804))
__device__ half2 absh2(half2 val)
{
unsigned int val_s = *((unsigned int *)&val);
val_s = val_s & 0x7FFF7FFF;
return *((half2 *)&val_s);
}
__device__ half2 cndGPU(half2 d)
{
half2 c1 = __float2half2_rn(1.0f);
half2 c2 = __float2half2_rn(0.2316419f);
half2 c3 = __float2half2_rn(-0.5f);
half2 c4 = __float2half2_rn(0.0);
half2
K = h2rcp(__hfma2(c2, absh2(d), c1));
half2
cnd = __hmul2(RSQRT2PI, __hmul2(h2exp(__hmul2(c3, __hmul2(d, d))),
(__hmul2(K, __hfma2(K,__hfma2(K, __hfma2(K, __hfma2(K, A5, A4), A3), A2), A1)))));
// if(d > 0)
// cnd = 1.0 - cnd;
half2 val_gt = __hgt2(d, c4);
//unsigned int temp = ((*((unsigned int *)&val_gt) & 0x2FFF2FFF) << 2) ^ *((unsigned int *)&cnd);
//cnd = *((half2 *)&temp);
//cnd = __hadd2(val_gt, cnd);
half one = __low2half(c1);
half cnd_l = __low2half(cnd);
half cnd_h = __high2half(cnd);
if(__heq(__low2half(val_gt), one))
{
cnd_l = __hsub(one,cnd_l);
}
if(__heq(__high2half(val_gt), one))
{
cnd_h = __hsub(one,cnd_h);
}
return __halves2half2(cnd_l, cnd_h);
}
///////////////////////////////////////////////////////////////////////////////
// Black-Scholes formula for both call and put
///////////////////////////////////////////////////////////////////////////////
__device__ void BlackScholesBodyGPU
(
half2& CallResult,
half2& PutResult,
half2 S, //Stock price
half2 X, //Option strike
half2 T, //Option years
float _R, //Riskless rate
float _V //Volatility rate
)
{
half2 R = __float2half2_rn(_R);
half2 V = __float2half2_rn(_V);
half2 sqrtT, expRT;
half2 d1, d2, CNDD1, CNDD2;
half2 c1 = __float2half2_rn(1.0f);
half2 c2 = __float2half2_rn(0.5f);
sqrtT = h2sqrt(T);
d1 = __hmul2(__hfma2(T, __hfma2(c2, __hmul2(V, V), R), h2log(__hmul2(S, h2rcp(X)))), h2rcp(__hmul2(V, sqrtT)));
d2 = __hsub2(d1, __hmul2(V, sqrtT));
CNDD1 = cndGPU(d1);
CNDD2 = cndGPU(d2);
//printf("%.15f,%.15f,", __half22float2(CNDD1).x, __half22float2(CNDD1).y);
//Calculate Call and Put simultaneously
expRT = h2exp(__hmul2(__hneg2(R), T));
CallResult = __hsub2(__hmul2(S, CNDD1), __hmul2(X, __hmul2(expRT, CNDD2)));
PutResult = __hsub2(__hmul2(X, __hmul2(expRT , __hsub2(c1, CNDD2))), __hmul2(S, __hsub2(c1, CNDD1)));
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of optN options on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void BlackScholesGPU(
half2 *d_CallResult,
half2 *d_PutResult,
half2 *d_StockPrice,
half2 *d_OptionStrike,
half2 *d_OptionYears,
float Riskfree,
float Volatility,
int optN
)
{
//Thread index
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Total number of threads in execution grid
const int THREAD_N = blockDim.x * gridDim.x;
//No matter how small is execution grid or how large OptN is,
//exactly OptN indices will be processed with perfect memory coalescing
for(int opt = tid; opt < optN/2; opt += THREAD_N)
BlackScholesBodyGPU(
d_CallResult[opt],
d_PutResult[opt],
d_StockPrice[opt],
d_OptionStrike[opt],
d_OptionYears[opt],
Riskfree,
Volatility
);
}
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
#define OPT_N 400000
const int NUM_ITERATIONS = 512;
const int OPT_SZ = OPT_N * sizeof(half);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
int main()
{
half * h_CallResultGPU = (half *)malloc(OPT_SZ);
half * h_PutResultGPU = (half *)malloc(OPT_SZ);
half * h_StockPrice = (half *)malloc(OPT_SZ);
half * h_OptionStrike = (half *)malloc(OPT_SZ);
half * h_OptionYears = (half *)malloc(OPT_SZ);
half
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
cudaMalloc((void **)&d_CallResult, OPT_SZ);
cudaMalloc((void **)&d_PutResult, OPT_SZ);
cudaMalloc((void **)&d_StockPrice, OPT_SZ);
cudaMalloc((void **)&d_OptionStrike, OPT_SZ);
cudaMalloc((void **)&d_OptionYears, OPT_SZ);
srand(5347);
//Generate options set
int i;
for(i = 0; i < OPT_N; i++)
{
h_CallResultGPU[i] = approx_float_to_half(0.0f);
h_PutResultGPU[i] = approx_float_to_half(-1.0f);
h_StockPrice[i] = approx_float_to_half(RandFloat(5.0f, 30.0f));
h_OptionStrike[i] = approx_float_to_half(RandFloat(1.0f, 100.0f));
h_OptionYears[i] = approx_float_to_half(RandFloat(0.25f, 10.0f));
}
cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice);
for(i = 0; i < NUM_ITERATIONS; i++){
BlackScholesGPU<<<256, 128>>>(
(half2 *)d_CallResult,
(half2 *)d_PutResult,
(half2 *)d_OptionStrike,
(half2 *)d_StockPrice,
(half2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
}
cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost);
//for(i = 0; i < OPT_N; i++)
// printf("%.15f,", half_to_float(h_CallResultGPU[i]));
return 0;
} |
3604575473f5f038374be92a6df21fb8eefce692.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// log.cpp
#include <nbla/cuda/function/log.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(Log, ::log(x), dy / x, false);
}
| 3604575473f5f038374be92a6df21fb8eefce692.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// log.cpp
#include <nbla/cuda/function/log.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(Log, std::log(x), dy / x, false);
}
|
1f3c253ca53a405338a0058857e7baadd8010e0c.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <vector>
#include <hip/hip_runtime.h>
#include "NPP_staging.hpp"
#include "opencv2/gpu/device/warp.hpp"
#include "opencv2/gpu/device/warp_shuffle.hpp"
texture<Ncv8u, 1, hipReadModeElementType> tex8u;
texture<Ncv32u, 1, hipReadModeElementType> tex32u;
texture<uint2, 1, hipReadModeElementType> tex64u;
//==============================================================================
//
// CUDA streams handling
//
//==============================================================================
static hipStream_t nppStream = 0;
hipStream_t nppStGetActiveCUDAstream(void)
{
return nppStream;
}
hipStream_t nppStSetActiveCUDAstream(hipStream_t cudaStream)
{
hipStream_t tmp = nppStream;
nppStream = cudaStream;
return tmp;
}
//==============================================================================
//
// BlockScan.cuh
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
template <class T>
inline __device__ T warpScanInclusive(T idata, volatile T *s_Data)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::gpu::device::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2)
{
const T n = cv::gpu::device::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}
return idata;
#else
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
#endif
}
inline __device__ Ncv64u warpScanInclusive(Ncv64u idata, volatile Ncv64u *s_Data)
{
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
template <class T>
inline __device__ T warpScanExclusive(T idata, volatile T *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <class T, Ncv32u tiNumScanThreads>
inline __device__ T blockScanInclusive(T idata, volatile T *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// IntegralImage.cu
//
//==============================================================================
const Ncv32u NUM_SCAN_THREADS = 256;
const Ncv32u LOG2_NUM_SCAN_THREADS = 8;
template<class T_in, class T_out>
struct _scanElemOp
{
template<bool tbDoSqr>
static inline __host__ __device__ T_out scanElemOp(T_in elem)
{
return scanElemOp( elem, Int2Type<(int)tbDoSqr>() );
}
private:
template <int v> struct Int2Type { enum { value = v }; };
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>)
{
return (T_out)elem;
}
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>)
{
return (T_out)(elem*elem);
}
};
template<class T>
inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs);
template<>
inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs);
}
template<>
inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
template<>
inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
/**
* \brief Segmented scan kernel
*
* Calculates per-row prefix scans of the input image.
* Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
* \tparam T_op Defines an operation to be performed on the input image pixels
*
* \param d_src [IN] Source image pointer
* \param srcWidth [IN] Source image width
* \param srcStride [IN] Source image stride
* \param d_II [OUT] Output image pointer
* \param IIstride [IN] Output image stride
*
* \return None
*/
template <class T_in, class T_out, bool tbDoSqr>
__global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride,
T_out *d_II, Ncv32u IIstride)
{
//advance pointers to the current line
if (sizeof(T_in) != 1)
{
d_src += srcStride * blockIdx.x;
}
//for initial image 8bit source we use texref tex8u
d_II += IIstride * blockIdx.x;
Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS;
Ncv32u offsetX = 0;
__shared__ T_out shmem[NUM_SCAN_THREADS * 2];
__shared__ T_out carryElem;
if (threadIdx.x == 0)
carryElem = 0;
__syncthreads();
while (numBuckets--)
{
Ncv32u curElemOffs = offsetX + threadIdx.x;
T_out curScanElem;
T_in curElem = 0;
T_out curElemMod;
if (curElemOffs < srcWidth)
{
//load elements
curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs);
}
curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem);
//inclusive scan
curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem);
if (curElemOffs <= srcWidth)
{
//make scan exclusive and write the bucket to the output buffer
d_II[curElemOffs] = carryElem + curScanElem - curElemMod;
offsetX += NUM_SCAN_THREADS;
}
//remember last element for subsequent buckets adjustment
__syncthreads();
if (threadIdx.x == NUM_SCAN_THREADS-1)
{
carryElem += curScanElem;
}
__syncthreads();
}
if (offsetX == srcWidth && !threadIdx.x)
{
d_II[offsetX] = carryElem;
}
}
template <bool tbDoSqr, class T_in, class T_out>
NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride,
T_out *d_dst, Ncv32u dstStride, NcvSize32u roi)
{
hipChannelFormatDesc cfdTex;
size_t alignmentOffset = 0;
if (sizeof(T_in) == 1)
{
cfdTex = hipCreateChannelDesc<Ncv8u>();
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
if (alignmentOffset > 0)
{
ncvAssertCUDAReturn(hipUnbindTexture(tex8u), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
}
}
hipLaunchKernelGGL(( scanRows
<T_in, T_out, tbDoSqr>)
, dim3(roi.height), dim3(NUM_SCAN_THREADS), 0, nppStGetActiveCUDAstream(),
d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment)
{
Ncv32u alignMask = allocatorAlignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u dimBytes = dim * elemTypeSize;
Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask;
Ncv32u PaddedDim = pitch / elemTypeSize;
return PaddedDim;
}
template <class T_in, class T_out>
NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep,
T_out *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) &&
dstStep >= (roi.width + 1) * sizeof(T_out) &&
srcStep % sizeof(T_in) == 0 &&
dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T_in);
dstStep /= sizeof(T_out);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<false>
(d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u),
(Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false>
(Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u),
(Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width &&
dstStep >= (roi.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64;
Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64;
NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax);
ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64);
ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<true, Ncv8u, Ncv32u>
(d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u),
Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false, Ncv32u, Ncv64u>
(Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u),
d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width,
(Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f),
(Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width,
NULL, (roiSize.width+1) * sizeof(Ncv64u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv32u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep,
Ncv32f *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv32u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) &&
dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv32u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32u top = h_dst[(i-1) * dstStep + j];
Ncv32u left = h_dst[i * dstStep + (j - 1)];
Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep,
Ncv32f *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) &&
srcStep % sizeof(Ncv32f) == 0 &&
dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(Ncv32f);
dstStep /= sizeof(Ncv32f);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0.0f;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32f top = h_dst[(i-1) * dstStep + j];
Ncv32f left = h_dst[i * dstStep + (j - 1)];
Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv64u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv64u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv64u top = h_dst[(i-1) * dstStep + j];
Ncv64u left = h_dst[i * dstStep + (j - 1)];
Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem*elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Decimate.cu
//
//==============================================================================
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32;
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8;
template<class T, NcvBool tbCacheTexture>
__device__ T getElem_Decimate(Ncv32u x, T *d_src);
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src)
{
return tex1Dfetch(tex32u, x);
}
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src)
{
return d_src[x];
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src)
{
return d_src[x];
}
template <class T, NcvBool tbCacheTexture>
__global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep,
NcvSize32u dstRoi, Ncv32u scale)
{
int curX = blockIdx.x * blockDim.x + threadIdx.x;
int curY = blockIdx.y * blockDim.y + threadIdx.y;
if (curX >= dstRoi.width || curY >= dstRoi.height)
{
return;
}
d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src);
}
template <class T>
static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep,
T *d_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale,
NcvBool readThruTexture)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X,
(dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
if (!readThruTexture)
{
hipLaunchKernelGGL(( decimate_C1R
<T, false>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
else
{
hipChannelFormatDesc cfdTexSrc;
if (sizeof(T) == sizeof(Ncv32u))
{
cfdTexSrc = hipCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
else
{
cfdTexSrc = hipCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
hipLaunchKernelGGL(( decimate_C1R
<T, true>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep,
T *h_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) &&
srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
for (Ncv32u i=0; i<dstRoi.height; i++)
{
for (Ncv32u j=0; j<dstRoi.width; j++)
{
h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale];
}
}
return NPPST_SUCCESS;
}
#define implementNppDecimate(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \
{ \
return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, \
srcRoi, scale, readThruTexture); \
}
#define implementNppDecimateHost(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale) \
{ \
return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, \
srcRoi, scale); \
}
implementNppDecimate(32, u)
implementNppDecimate(32, s)
implementNppDecimate(32, f)
implementNppDecimate(64, u)
implementNppDecimate(64, s)
implementNppDecimate(64, f)
implementNppDecimateHost(32, u)
implementNppDecimateHost(32, s)
implementNppDecimateHost(32, f)
implementNppDecimateHost(64, u)
implementNppDecimateHost(64, s)
implementNppDecimateHost(64, f)
//==============================================================================
//
// RectStdDev.cu
//
//==============================================================================
const Ncv32u NUM_RECTSTDDEV_THREADS = 128;
template <NcvBool tbCacheTexture>
__device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum)
{
if (tbCacheTexture)
{
return tex1Dfetch(tex32u, x);
}
else
{
return d_sum[x];
}
}
template <NcvBool tbCacheTexture>
__device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum)
{
if (tbCacheTexture)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
else
{
return d_sqsum[x];
}
}
template <NcvBool tbCacheTexture>
__global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea)
{
Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x;
if (x_offs >= roi.width)
{
return;
}
Ncv32u sum_offset = blockIdx.y * sumStep + x_offs;
Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs;
//OPT: try swapping order (could change cache hit/miss ratio)
Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum);
Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum);
Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br;
sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum);
sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum);
sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum);
sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum);
Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl;
Ncv32f mean = sum_val * invRectArea;
//////////////////////////////////////////////////////////////////////////
// sqsum_val_res = sqsum_val / rectArea
//////////////////////////////////////////////////////////////////////////
Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val);
Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1);
Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2;
Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3);
sqsum_val_1 *= invRectArea;
sqsum_val_4 *= invRectArea;
Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4;
//////////////////////////////////////////////////////////////////////////
// variance = sqsum_val_res - mean * mean
//////////////////////////////////////////////////////////////////////////
#if defined DISABLE_MAD_SELECTIVELY
Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean);
#else
Ncv32f variance = sqsum_val_res - mean * mean;
#endif
//////////////////////////////////////////////////////////////////////////
// stddev = sqrtf(variance)
//////////////////////////////////////////////////////////////////////////
//Ncv32f stddev = sqrtf(variance);
Ncv32f stddev = __fsqrt_rn(variance);
d_norm[blockIdx.y * normStep + x_offs] = stddev;
}
NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea, NcvBool readThruTexture)
{
ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height);
dim3 block(NUM_RECTSTDDEV_THREADS);
if (!readThruTexture)
{
hipLaunchKernelGGL(( rectStdDev_32f_C1R
<false>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
else
{
hipChannelFormatDesc cfdTexSrc;
hipChannelFormatDesc cfdTexSqr;
cfdTexSrc = hipCreateChannelDesc<Ncv32u>();
cfdTexSqr = hipCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
hipLaunchKernelGGL(( rectStdDev_32f_C1R
<true>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep,
Ncv64u *h_sqsum, Ncv32u sqsumStep,
Ncv32f *h_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea)
{
ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
for (Ncv32u i=0; i<roi.height; i++)
{
for (Ncv32u j=0; j<roi.width; j++)
{
Ncv32u sum_offset = i * sumStep + j;
Ncv32u sqsum_offset = i * sqsumStep + j;
Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x];
Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x];
Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width];
Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width];
Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x];
Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x];
Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width];
Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width];
Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl);
Ncv64f mean = sum_val * invRectArea;
Ncv64f sqsum_val_2 = sqsum_val / rectArea;
Ncv64f variance = sqsum_val_2 - mean * mean;
h_norm[i * normStep + j] = (Ncv32f)sqrt(variance);
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Transpose.cu
//
//==============================================================================
const Ncv32u TRANSPOSE_TILE_DIM = 16;
const Ncv32u TRANSPOSE_BLOCK_ROWS = 16;
/**
* \brief Matrix transpose kernel
*
* Calculates transpose of the input image
* \see TRANSPOSE_TILE_DIM
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
*
* \param d_src [IN] Source image pointer
* \param srcStride [IN] Source image stride
* \param d_dst [OUT] Output image pointer
* \param dstStride [IN] Output image stride
*
* \return None
*/
template <class T>
__global__ void transpose(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
Ncv32u blockIdx_x, blockIdx_y;
// do diagonal reordering
if (gridDim.x == gridDim.y)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x;
Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y;
Ncv32u index_gmem = xIndex + yIndex * srcStride;
if (xIndex < srcRoi.width)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.height)
{
tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride];
}
}
}
__syncthreads();
xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_gmem = xIndex + yIndex * dstStride;
if (xIndex < srcRoi.height)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.width)
{
d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
}
template <class T>
NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM,
(srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM);
dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM);
hipLaunchKernelGGL(( transpose
<T>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcStride, d_dst, dstStride, srcRoi);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride,
T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
for (Ncv32u i=0; i<srcRoi.height; i++)
{
for (Ncv32u j=0; j<srcRoi.width; j++)
{
h_dst[j*dstStride+i] = h_src[i*srcStride + j];
}
}
return NPPST_SUCCESS;
}
#define implementNppTranspose(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \
{ \
return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, srcRoi); \
}
#define implementNppTransposeHost(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi) \
{ \
return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, srcRoi); \
}
implementNppTranspose(32,u)
implementNppTranspose(32,s)
implementNppTranspose(32,f)
implementNppTranspose(64,u)
implementNppTranspose(64,s)
implementNppTranspose(64,f)
implementNppTransposeHost(32,u)
implementNppTransposeHost(32,s)
implementNppTransposeHost(32,f)
implementNppTransposeHost(64,u)
implementNppTransposeHost(64,s)
implementNppTransposeHost(64,f)
NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
//==============================================================================
//
// Compact.cu
//
//==============================================================================
const Ncv32u NUM_REMOVE_THREADS = 256;
template <bool bRemove, bool bWritePartial>
__global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_blockSums,
Ncv32u elemRemove)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn > srcLen + blockDim.x)
{
return;
}
__shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2];
Ncv32u scanElem = 0;
if (elemAddrIn < srcLen)
{
if (bRemove)
{
scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0;
}
else
{
scanElem = d_src[elemAddrIn];
}
}
Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem);
__syncthreads();
if (elemAddrIn < srcLen)
{
if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial)
{
d_blockSums[blockId] = localScanInc;
}
if (bRemove)
{
d_offsets[elemAddrIn] = localScanInc - scanElem;
}
else
{
d_src[elemAddrIn] = localScanInc - scanElem;
}
}
}
__global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
__shared__ Ncv32u valOffs;
valOffs = d_blockSums[blockId];
__syncthreads();
d_offsets[elemAddrIn] += valOffs;
}
__global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_dst,
Ncv32u elemRemove, Ncv32u *dstLenValue)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
Ncv32u elem = d_src[elemAddrIn];
Ncv32u elemAddrOut = d_offsets[elemAddrIn];
if (elem != elemRemove)
{
d_dst[elemAddrOut] = elem;
}
if (elemAddrIn == srcLen-1)
{
if (elem != elemRemove)
{
*dstLenValue = elemAddrOut + 1;
}
else
{
*dstLenValue = elemAddrOut;
}
}
}
NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *dstLenPinned,
Ncv32u elemRemove,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLenPinned != NULL)
{
*dstLenPinned = 0;
}
return NPPST_SUCCESS;
}
std::vector<Ncv32u> partSumNums;
std::vector<Ncv32u> partSumOffsets;
Ncv32u partSumLastNum = srcLen;
Ncv32u partSumLastOffs = 0;
do
{
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u),
gpuAllocator.alignment()) / sizeof(Ncv32u);
partSumLastOffs += curPartSumAlignedLength;
partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS;
}
while (partSumLastNum>1);
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1);
ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1);
ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
dim3 block(NUM_REMOVE_THREADS);
//calculate zero-level partial sums for indices calculation
if (partSumNums.size() > 2)
{
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( removePass1Scan
<true, true>)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcLen,
d_hierSums.ptr(),
d_hierSums.ptr() + partSumOffsets[1],
elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//calculate hierarchical partial sums
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
{
dim3 grid_partial(partSumNums[i+1]);
if (grid_partial.x > 65535)
{
grid_partial.y = (grid_partial.x + 65534) / 65535;
grid_partial.x = 65535;
}
if (grid_partial.x != 1)
{
hipLaunchKernelGGL(( removePass1Scan
<false, true>)
, dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(),
d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
d_hierSums.ptr() + partSumOffsets[i+1],
0);
}
else
{
hipLaunchKernelGGL(( removePass1Scan
<false, false>)
, dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(),
d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
NULL,
0);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//adjust hierarchical partial sums
for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--)
{
dim3 grid_local(partSumNums[i+1]);
if (grid_local.x > 65535)
{
grid_local.y = (grid_local.x + 65534) / 65535;
grid_local.x = 65535;
}
hipLaunchKernelGGL(( removePass2Adjust)
, dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(),
d_hierSums.ptr() + partSumOffsets[i], partSumNums[i],
d_hierSums.ptr() + partSumOffsets[i+1]);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
}
else
{
dim3 grid_local(partSumNums[1]);
hipLaunchKernelGGL(( removePass1Scan
<true, false>)
, dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcLen,
d_hierSums.ptr(),
NULL, elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//compact source vector using indices
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( removePass3Compact)
, dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(),
d_src, srcLen, d_hierSums.ptr(), d_dst,
elemRemove, d_numDstElements.ptr());
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//get number of dst elements
if (dstLenPinned != NULL)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u),
hipMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
}
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
*pBufsize = 0;
return NPPST_SUCCESS;
}
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE,
gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *p_dstLen,
Ncv32u elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove,
gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen,
Ncv32s *d_dst, Ncv32u *p_dstLen,
Ncv32s elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp);
}
#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4
typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a;
#else
typedef Ncv32u Ncv32u_a;
#endif
NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen,
Ncv32f *d_dst, Ncv32u *p_dstLen,
Ncv32f elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, hipDeviceProp_t &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp);
}
NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen,
Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLen != NULL)
{
*dstLen = 0;
}
return NPPST_SUCCESS;
}
Ncv32u dstIndex = 0;
for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++)
{
if (h_src[srcIndex] != elemRemove)
{
h_dst[dstIndex++] = h_src[srcIndex];
}
}
if (dstLen != NULL)
{
*dstLen = dstIndex;
}
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen,
Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,
Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
//==============================================================================
//
// Filter.cu
//
//==============================================================================
texture <float, 1, hipReadModeElementType> texSrc;
texture <float, 1, hipReadModeElementType> texKernel;
__forceinline__ __device__ float getValueMirrorRow(const int rowOffset,
int i,
int w)
{
if (i < 0) i = 1 - i;
if (i >= w) i = w + w - i - 1;
return tex1Dfetch (texSrc, rowOffset + i);
}
__forceinline__ __device__ float getValueMirrorColumn(const int offset,
const int rowStep,
int j,
int h)
{
if (j < 0) j = 1 - j;
if (j >= h) j = h + h - j - 1;
return tex1Dfetch (texSrc, offset + j * rowStep);
}
__global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
// position within ROI
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int j = roi.y + iy;
const int rowOffset = j * srcStep + roi.x;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width)
* tex1Dfetch (texKernel, m);
}
pDst[iy * dstStep + ix] = sum * multiplier;
}
__global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int i = roi.x + ix;
const int offset = i + roi.y * srcStep;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height)
* tex1Dfetch (texKernel, m);
}
pDst[ix + iy * dstStep] = sum * multiplier;
}
NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderNone:
return NPPST_ERROR;
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
hipLaunchKernelGGL(( FilterRowBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
hipLaunchKernelGGL(( FilterColumnBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// FrameInterpolate.cu
//
//==============================================================================
inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom)
{
return (num + denom - 1)/denom;
}
texture<float, 2, hipReadModeElementType> tex_src1;
texture<float, 2, hipReadModeElementType> tex_src0;
__global__ void BlendFramesKernel(const float *u, const float *v, // forward flow
const float *ur, const float *vr, // backward flow
const float *o0, const float *o1, // coverage masks
int w, int h, int s,
float theta, float *out)
{
const int ix = threadIdx.x + blockDim.x * blockIdx.x;
const int iy = threadIdx.y + blockDim.y * blockIdx.y;
const int pos = ix + s * iy;
if (ix >= w || iy >= h) return;
float _u = u[pos];
float _v = v[pos];
float _ur = ur[pos];
float _vr = vr[pos];
float x = (float)ix + 0.5f;
float y = (float)iy + 0.5f;
bool b0 = o0[pos] > 1e-4f;
bool b1 = o1[pos] > 1e-4f;
if (b0 && b1)
{
// pixel is visible on both frames
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) +
tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta;
}
else if (b0)
{
// visible on the first frame only
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta);
}
else
{
// visible on the second frame only
out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta));
}
}
NCVStatus BlendFrames(const Ncv32f *src0,
const Ncv32f *src1,
const Ncv32f *ufi,
const Ncv32f *vfi,
const Ncv32f *ubi,
const Ncv32f *vbi,
const Ncv32f *o1,
const Ncv32f *o2,
Ncv32u width,
Ncv32u height,
Ncv32u stride,
Ncv32f theta,
Ncv32f *out)
{
tex_src1.addressMode[0] = hipAddressModeClamp;
tex_src1.addressMode[1] = hipAddressModeClamp;
tex_src1.filterMode = hipFilterModeLinear;
tex_src1.normalized = false;
tex_src0.addressMode[0] = hipAddressModeClamp;
tex_src0.addressMode[1] = hipAddressModeClamp;
tex_src0.filterMode = hipFilterModeLinear;
tex_src0.normalized = false;
hipChannelFormatDesc desc = hipCreateChannelDesc <float> ();
const Ncv32u pitch = stride * sizeof (float);
ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
dim3 threads (32, 4);
dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y));
hipLaunchKernelGGL(( BlendFramesKernel), dim3(blocks), dim3(threads), 0, nppStGetActiveCUDAstream (),
ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize,
Ncv32u nStep,
Ncv32u *hpSize)
{
NCVStatus status = NPPST_ERROR;
status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize);
return status;
}
NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState)
{
// check state validity
ncvAssertReturn (pState->pSrcFrame0 != 0 &&
pState->pSrcFrame1 != 0 &&
pState->pFU != 0 &&
pState->pFV != 0 &&
pState->pBU != 0 &&
pState->pBV != 0 &&
pState->pNewFrame != 0 &&
pState->ppBuffers[0] != 0 &&
pState->ppBuffers[1] != 0 &&
pState->ppBuffers[2] != 0 &&
pState->ppBuffers[3] != 0 &&
pState->ppBuffers[4] != 0 &&
pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (pState->size.width > 0 &&
pState->size.height > 0, NPPST_ERROR);
ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) &&
pState->nStep > 0 &&
pState->nStep % sizeof (Ncv32f) == 0,
NPPST_INVALID_STEP);
// change notation
Ncv32f *cov0 = pState->ppBuffers[0];
Ncv32f *cov1 = pState->ppBuffers[1];
Ncv32f *fwdU = pState->ppBuffers[2]; // forward u
Ncv32f *fwdV = pState->ppBuffers[3]; // forward v
Ncv32f *bwdU = pState->ppBuffers[4]; // backward u
Ncv32f *bwdV = pState->ppBuffers[5]; // backward v
// warp flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdV) );
// warp backward flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
// interpolate frame
ncvAssertReturnNcvStat (
BlendFrames (pState->pSrcFrame0,
pState->pSrcFrame1,
fwdU,
fwdV,
bwdU,
bwdV,
cov0,
cov1,
pState->size.width,
pState->size.height,
pState->nStep / sizeof (Ncv32f),
pState->pos,
pState->pNewFrame) );
return NPPST_SUCCESS;
}
//==============================================================================
//
// VectorWarpFrame.cu
//
//==============================================================================
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
// FP32 atomic add
static __forceinline__ __device__ float _atomicAdd(float *addr, float val)
{
float old = *addr, assumed;
do {
assumed = old;
old = int_as_float(__iAtomicCAS((int*)addr,
float_as_int(assumed),
float_as_int(val+assumed)));
} while( assumed!=old );
return old;
}
#else
#define _atomicAdd atomicAdd
#endif
__global__ void ForwardWarpKernel_PSF2x2(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *normalization_factor,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
//bottom left corner of a target pixel
float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f;
float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
float px;
float py;
float dx = modff (cx, &px);
float dy = modff (cy, &py);
// target pixel integer coords
int tx;
int ty;
tx = (int) px;
ty = (int) py;
float value = src[image_row_offset + j];
float weight;
// fill pixel containing bottom right corner
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing bottom left corner
tx -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper left corner
ty -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper right corner
tx += 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
}
__global__ void ForwardWarpKernel_PSF1x1(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
float u_ = u[flow_row_offset + j];
float v_ = v[flow_row_offset + j];
//bottom left corner of target pixel
float cx = u_ * time_scale + (float)j + 1.0f;
float cy = v_ * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
int tx = __float2int_rn (cx);
int ty = __float2int_rn (cy);
float value = src[image_row_offset + j];
// fill pixel
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
_atomicAdd (dst + ty * image_stride + tx, value);
}
}
__global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * s + j;
float scale = normalization_factor[pos];
float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale);
image[pos] *= invScale;
}
__global__ void MemsetKernel(const float value, int w, int h, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * w + j;
image[pos] = value;
}
NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize)
{
ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep,
NPPST_INVALID_STEP);
*hpSize = nSrcStep * srcSize.height;
return NPPST_SUCCESS;
}
// does not require normalization
NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof (Ncv32f);
dim3 ctaSize (32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
hipLaunchKernelGGL(( ForwardWarpKernel_PSF1x1) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f *pBuffer,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL &&
pBuffer != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof(Ncv32f);
dim3 ctaSize(32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
hipLaunchKernelGGL(( MemsetKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
0, srcSize.width, srcSize.height, pBuffer);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
hipLaunchKernelGGL(( ForwardWarpKernel_PSF2x2) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
hipLaunchKernelGGL(( NormalizeKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(),
pBuffer, srcSize.width, srcSize.height, srcStep, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
//==============================================================================
//
// Resize.cu
//
//==============================================================================
texture <float, 2, hipReadModeElementType> texSrc2D;
__forceinline__
__device__ float processLine(int spos,
float xmin,
float xmax,
int ixmin,
int ixmax,
float fxmin,
float cxmax)
{
// first element
float wsum = 1.0f - xmin + fxmin;
float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin);
spos++;
for (int ix = ixmin + 1; ix < ixmax; ++ix)
{
sum += tex1Dfetch(texSrc, spos);
spos++;
wsum += 1.0f;
}
sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax);
wsum += cxmax - xmax;
return sum / wsum;
}
__global__ void resizeSuperSample_32f(NcvSize32u srcSize,
Ncv32u srcStep,
NcvRect32u srcROI,
Ncv32f *dst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
// position within dst ROI
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
// source position
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// x sampling range
float xBegin = fmax (x - scaleX, 0.0f);
float xEnd = fmin (x + scaleX, rw - 1.0f);
// y sampling range
float yBegin = fmax (y - scaleY, 0.0f);
float yEnd = fmin (y + scaleY, rh - 1.0f);
// x range of source samples
float floorXBegin = floorf (xBegin);
float ceilXEnd = ceilf (xEnd);
int iXBegin = srcROI.x + (int) floorXBegin;
int iXEnd = srcROI.x + (int) ceilXEnd;
// y range of source samples
float floorYBegin = floorf (yBegin);
float ceilYEnd = ceilf (yEnd);
int iYBegin = srcROI.y + (int) floorYBegin;
int iYEnd = srcROI.y + (int) ceilYEnd;
// first row
int pos = iYBegin * srcStep + iXBegin;
float wsum = 1.0f - yBegin + floorYBegin;
float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (1.0f - yBegin + floorYBegin);
pos += srcStep;
for (int iy = iYBegin + 1; iy < iYEnd; ++iy)
{
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd);
pos += srcStep;
wsum += 1.0f;
}
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (ceilYEnd - yEnd);
wsum += ceilYEnd - yEnd;
sum /= wsum;
dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum;
}
// bicubic interpolation
__forceinline__
__device__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f)
{
return x * x * (1.5f * x - 2.5f) + 1.0f;
}
else if (x < 2.0f)
{
return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
}
else
{
return 0.0f;
}
}
__global__ void resizeBicubic(NcvSize32u srcSize,
NcvRect32u srcROI,
NcvSize32u dstSize,
Ncv32u dstStep,
Ncv32f *dst,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
const float dx = 1.0f / srcROI.width;
const float dy = 1.0f / srcROI.height;
float rx = (float) srcROI.x;
float ry = (float) srcROI.y;
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// sampling range
// border mode is clamp
float xmin = fmax (ceilf (x - 2.0f), 0.0f);
float xmax = fmin (floorf (x + 2.0f), rw - 1.0f);
float ymin = fmax (ceilf (y - 2.0f), 0.0f);
float ymax = fmin (floorf (y + 2.0f), rh - 1.0f);
// shift data window to match ROI
rx += 0.5f;
ry += 0.5f;
x += rx;
y += ry;
xmin += rx;
xmax += rx;
ymin += ry;
ymax += ry;
float sum = 0.0f;
float wsum = 0.0f;
for (float cy = ymin; cy <= ymax; cy += 1.0f)
{
for (float cx = xmin; cx <= xmax; cx += 1.0f)
{
float xDist = x - cx;
float yDist = y - cy;
float wx = bicubicCoeff (xDist);
float wy = bicubicCoeff (yDist);
wx *= wy;
sum += wx * tex2D (texSrc2D, cx * dx, cy * dy);
wsum += wx;
}
}
dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum;
}
NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
NcvRect32u srcROI,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u dstROI,
Ncv32f xFactor,
Ncv32f yFactor,
NppStInterpMode interpolation)
{
NCVStatus status = NPPST_SUCCESS;
ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE);
ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width &&
nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// TODO: preprocess ROI to prevent out of bounds access
if (interpolation == nppStSupersample)
{
// bind texture
hipBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep);
// invoke kernel
dim3 ctaSize (32, 6);
dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x,
(dstROI.height + ctaSize.y - 1) / ctaSize.y);
hipLaunchKernelGGL(( resizeSuperSample_32f) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else if (interpolation == nppStBicubic)
{
texSrc2D.addressMode[0] = hipAddressModeMirror;
texSrc2D.addressMode[1] = hipAddressModeMirror;
texSrc2D.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc <float> ();
hipBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height,
nSrcStep);
dim3 ctaSize (32, 6);
dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x,
(dstSize.height + ctaSize.y - 1) / ctaSize.y);
hipLaunchKernelGGL(( resizeBicubic) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (),
srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else
{
status = NPPST_ERROR;
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return status;
}
#endif /* CUDA_DISABLER */
| 1f3c253ca53a405338a0058857e7baadd8010e0c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <vector>
#include <cuda_runtime.h>
#include "NPP_staging.hpp"
#include "opencv2/gpu/device/warp.hpp"
#include "opencv2/gpu/device/warp_shuffle.hpp"
texture<Ncv8u, 1, cudaReadModeElementType> tex8u;
texture<Ncv32u, 1, cudaReadModeElementType> tex32u;
texture<uint2, 1, cudaReadModeElementType> tex64u;
//==============================================================================
//
// CUDA streams handling
//
//==============================================================================
static cudaStream_t nppStream = 0;
cudaStream_t nppStGetActiveCUDAstream(void)
{
return nppStream;
}
cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream)
{
cudaStream_t tmp = nppStream;
nppStream = cudaStream;
return tmp;
}
//==============================================================================
//
// BlockScan.cuh
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
template <class T>
inline __device__ T warpScanInclusive(T idata, volatile T *s_Data)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::gpu::device::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2)
{
const T n = cv::gpu::device::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}
return idata;
#else
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
#endif
}
inline __device__ Ncv64u warpScanInclusive(Ncv64u idata, volatile Ncv64u *s_Data)
{
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
template <class T>
inline __device__ T warpScanExclusive(T idata, volatile T *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <class T, Ncv32u tiNumScanThreads>
inline __device__ T blockScanInclusive(T idata, volatile T *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// IntegralImage.cu
//
//==============================================================================
const Ncv32u NUM_SCAN_THREADS = 256;
const Ncv32u LOG2_NUM_SCAN_THREADS = 8;
template<class T_in, class T_out>
struct _scanElemOp
{
template<bool tbDoSqr>
static inline __host__ __device__ T_out scanElemOp(T_in elem)
{
return scanElemOp( elem, Int2Type<(int)tbDoSqr>() );
}
private:
template <int v> struct Int2Type { enum { value = v }; };
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>)
{
return (T_out)elem;
}
static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>)
{
return (T_out)(elem*elem);
}
};
template<class T>
inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs);
template<>
inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs);
}
template<>
inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
template<>
inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs)
{
return d_src[curElemOffs];
}
/**
* \brief Segmented scan kernel
*
* Calculates per-row prefix scans of the input image.
* Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
* \tparam T_op Defines an operation to be performed on the input image pixels
*
* \param d_src [IN] Source image pointer
* \param srcWidth [IN] Source image width
* \param srcStride [IN] Source image stride
* \param d_II [OUT] Output image pointer
* \param IIstride [IN] Output image stride
*
* \return None
*/
template <class T_in, class T_out, bool tbDoSqr>
__global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride,
T_out *d_II, Ncv32u IIstride)
{
//advance pointers to the current line
if (sizeof(T_in) != 1)
{
d_src += srcStride * blockIdx.x;
}
//for initial image 8bit source we use texref tex8u
d_II += IIstride * blockIdx.x;
Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS;
Ncv32u offsetX = 0;
__shared__ T_out shmem[NUM_SCAN_THREADS * 2];
__shared__ T_out carryElem;
if (threadIdx.x == 0)
carryElem = 0;
__syncthreads();
while (numBuckets--)
{
Ncv32u curElemOffs = offsetX + threadIdx.x;
T_out curScanElem;
T_in curElem = 0;
T_out curElemMod;
if (curElemOffs < srcWidth)
{
//load elements
curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs);
}
curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem);
//inclusive scan
curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem);
if (curElemOffs <= srcWidth)
{
//make scan exclusive and write the bucket to the output buffer
d_II[curElemOffs] = carryElem + curScanElem - curElemMod;
offsetX += NUM_SCAN_THREADS;
}
//remember last element for subsequent buckets adjustment
__syncthreads();
if (threadIdx.x == NUM_SCAN_THREADS-1)
{
carryElem += curScanElem;
}
__syncthreads();
}
if (offsetX == srcWidth && !threadIdx.x)
{
d_II[offsetX] = carryElem;
}
}
template <bool tbDoSqr, class T_in, class T_out>
NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride,
T_out *d_dst, Ncv32u dstStride, NcvSize32u roi)
{
cudaChannelFormatDesc cfdTex;
size_t alignmentOffset = 0;
if (sizeof(T_in) == 1)
{
cfdTex = cudaCreateChannelDesc<Ncv8u>();
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
if (alignmentOffset > 0)
{
ncvAssertCUDAReturn(cudaUnbindTexture(tex8u), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR);
}
}
scanRows
<T_in, T_out, tbDoSqr>
<<<roi.height, NUM_SCAN_THREADS, 0, nppStGetActiveCUDAstream()>>>
(d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment)
{
Ncv32u alignMask = allocatorAlignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u dimBytes = dim * elemTypeSize;
Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask;
Ncv32u PaddedDim = pitch / elemTypeSize;
return PaddedDim;
}
template <class T_in, class T_out>
NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep,
T_out *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) &&
dstStep >= (roi.width + 1) * sizeof(T_out) &&
srcStep % sizeof(T_in) == 0 &&
dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T_in);
dstStep /= sizeof(T_out);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32);
ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<false>
(d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u),
(Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false>
(Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u),
(Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice ||
gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roi.width &&
dstStep >= (roi.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roi.width + 1;
Ncv32u HeightII = roi.height + 1;
Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment());
Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment());
Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64;
Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64;
NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax);
ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32);
ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64);
ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
ncvStat = scanRowsWrapperDevice
<true, Ncv8u, Ncv32u>
(d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u),
Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = scanRowsWrapperDevice
<false, Ncv32u, Ncv64u>
(Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII));
ncvAssertReturnNcvStat(ncvStat);
ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u),
d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII));
ncvAssertReturnNcvStat(ncvStat);
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width,
(Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f),
(Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width,
NULL, (roiSize.width+1) * sizeof(Ncv64u),
roiSize, gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv32u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep,
Ncv32f *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep,
Ncv64u *d_dst, Ncv32u dstStep,
NcvSize32u roiSize, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv32u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) &&
dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv32u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32u top = h_dst[(i-1) * dstStep + j];
Ncv32u left = h_dst[i * dstStep + (j - 1)];
Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep,
Ncv32f *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) &&
srcStep % sizeof(Ncv32f) == 0 &&
dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(Ncv32f);
dstStep /= sizeof(Ncv32f);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv32u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0.0f;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv32f top = h_dst[(i-1) * dstStep + j];
Ncv32f left = h_dst[i * dstStep + (j - 1)];
Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep,
Ncv64u *h_dst, Ncv32u dstStep,
NcvSize32u roiSize)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStep >= roiSize.width &&
dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) &&
dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP);
dstStep /= sizeof(Ncv64u);
Ncv32u WidthII = roiSize.width + 1;
Ncv32u HeightII = roiSize.height + 1;
memset(h_dst, 0, WidthII * sizeof(Ncv64u));
for (Ncv32u i=1; i<HeightII; i++)
{
h_dst[i * dstStep] = 0;
for (Ncv32u j=1; j<WidthII; j++)
{
Ncv64u top = h_dst[(i-1) * dstStep + j];
Ncv64u left = h_dst[i * dstStep + (j - 1)];
Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)];
Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)];
h_dst[i * dstStep + j] = elem*elem + left - topleft + top;
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Decimate.cu
//
//==============================================================================
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32;
const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8;
template<class T, NcvBool tbCacheTexture>
__device__ T getElem_Decimate(Ncv32u x, T *d_src);
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src)
{
return tex1Dfetch(tex32u, x);
}
template<>
__device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src)
{
return d_src[x];
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
template<>
__device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src)
{
return d_src[x];
}
template <class T, NcvBool tbCacheTexture>
__global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep,
NcvSize32u dstRoi, Ncv32u scale)
{
int curX = blockIdx.x * blockDim.x + threadIdx.x;
int curY = blockIdx.y * blockDim.y + threadIdx.y;
if (curX >= dstRoi.width || curY >= dstRoi.height)
{
return;
}
d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src);
}
template <class T>
static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep,
T *d_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale,
NcvBool readThruTexture)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X,
(dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y);
if (!readThruTexture)
{
decimate_C1R
<T, false>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
else
{
cudaChannelFormatDesc cfdTexSrc;
if (sizeof(T) == sizeof(Ncv32u))
{
cfdTexSrc = cudaCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
else
{
cfdTexSrc = cudaCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
}
decimate_C1R
<T, true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcStep, d_dst, dstStep, dstRoi, scale);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep,
T *h_dst, Ncv32u dstStep,
NcvSize32u srcRoi, Ncv32u scale)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI);
ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE);
ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) &&
dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) &&
srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStep /= sizeof(T);
dstStep /= sizeof(T);
NcvSize32u dstRoi;
dstRoi.width = srcRoi.width / scale;
dstRoi.height = srcRoi.height / scale;
for (Ncv32u i=0; i<dstRoi.height; i++)
{
for (Ncv32u j=0; j<dstRoi.width; j++)
{
h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale];
}
}
return NPPST_SUCCESS;
}
#define implementNppDecimate(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \
{ \
return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, \
srcRoi, scale, readThruTexture); \
}
#define implementNppDecimateHost(bit, typ) \
NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi, Ncv32u scale) \
{ \
return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, \
srcRoi, scale); \
}
implementNppDecimate(32, u)
implementNppDecimate(32, s)
implementNppDecimate(32, f)
implementNppDecimate(64, u)
implementNppDecimate(64, s)
implementNppDecimate(64, f)
implementNppDecimateHost(32, u)
implementNppDecimateHost(32, s)
implementNppDecimateHost(32, f)
implementNppDecimateHost(64, u)
implementNppDecimateHost(64, s)
implementNppDecimateHost(64, f)
//==============================================================================
//
// RectStdDev.cu
//
//==============================================================================
const Ncv32u NUM_RECTSTDDEV_THREADS = 128;
template <NcvBool tbCacheTexture>
__device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum)
{
if (tbCacheTexture)
{
return tex1Dfetch(tex32u, x);
}
else
{
return d_sum[x];
}
}
template <NcvBool tbCacheTexture>
__device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum)
{
if (tbCacheTexture)
{
uint2 tmp = tex1Dfetch(tex64u, x);
Ncv64u res = (Ncv64u)tmp.y;
res <<= 32;
res |= tmp.x;
return res;
}
else
{
return d_sqsum[x];
}
}
template <NcvBool tbCacheTexture>
__global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea)
{
Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x;
if (x_offs >= roi.width)
{
return;
}
Ncv32u sum_offset = blockIdx.y * sumStep + x_offs;
Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs;
//OPT: try swapping order (could change cache hit/miss ratio)
Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum);
Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum);
Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum);
Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br;
sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum);
sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum);
sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum);
sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum);
Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl;
Ncv32f mean = sum_val * invRectArea;
//////////////////////////////////////////////////////////////////////////
// sqsum_val_res = sqsum_val / rectArea
//////////////////////////////////////////////////////////////////////////
Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val);
Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1);
Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2;
Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3);
sqsum_val_1 *= invRectArea;
sqsum_val_4 *= invRectArea;
Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4;
//////////////////////////////////////////////////////////////////////////
// variance = sqsum_val_res - mean * mean
//////////////////////////////////////////////////////////////////////////
#if defined DISABLE_MAD_SELECTIVELY
Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean);
#else
Ncv32f variance = sqsum_val_res - mean * mean;
#endif
//////////////////////////////////////////////////////////////////////////
// stddev = sqrtf(variance)
//////////////////////////////////////////////////////////////////////////
//Ncv32f stddev = sqrtf(variance);
Ncv32f stddev = __fsqrt_rn(variance);
d_norm[blockIdx.y * normStep + x_offs] = stddev;
}
NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep,
Ncv64u *d_sqsum, Ncv32u sqsumStep,
Ncv32f *d_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea, NcvBool readThruTexture)
{
ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height);
dim3 block(NUM_RECTSTDDEV_THREADS);
if (!readThruTexture)
{
rectStdDev_32f_C1R
<false>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
else
{
cudaChannelFormatDesc cfdTexSrc;
cudaChannelFormatDesc cfdTexSqr;
cfdTexSrc = cudaCreateChannelDesc<Ncv32u>();
cfdTexSqr = cudaCreateChannelDesc<uint2>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR);
ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR);
rectStdDev_32f_C1R
<true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep,
Ncv64u *h_sqsum, Ncv32u sqsumStep,
Ncv32f *h_norm, Ncv32u normStep,
NcvSize32u roi, NcvRect32u rect,
Ncv32f scaleArea)
{
ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) &&
sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) &&
normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) &&
sumStep % sizeof(Ncv32u) == 0 &&
sqsumStep % sizeof(Ncv64u) == 0 &&
normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP);
ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE);
sumStep /= sizeof(Ncv32u);
sqsumStep /= sizeof(Ncv64u);
normStep /= sizeof(Ncv32f);
Ncv32f rectArea = rect.width * rect.height * scaleArea;
Ncv32f invRectArea = 1.0f / rectArea;
for (Ncv32u i=0; i<roi.height; i++)
{
for (Ncv32u j=0; j<roi.width; j++)
{
Ncv32u sum_offset = i * sumStep + j;
Ncv32u sqsum_offset = i * sqsumStep + j;
Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x];
Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x];
Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width];
Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width];
Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl;
Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x];
Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x];
Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width];
Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width];
Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl);
Ncv64f mean = sum_val * invRectArea;
Ncv64f sqsum_val_2 = sqsum_val / rectArea;
Ncv64f variance = sqsum_val_2 - mean * mean;
h_norm[i * normStep + j] = (Ncv32f)sqrt(variance);
}
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// Transpose.cu
//
//==============================================================================
const Ncv32u TRANSPOSE_TILE_DIM = 16;
const Ncv32u TRANSPOSE_BLOCK_ROWS = 16;
/**
* \brief Matrix transpose kernel
*
* Calculates transpose of the input image
* \see TRANSPOSE_TILE_DIM
*
* \tparam T_in Type of input image elements
* \tparam T_out Type of output image elements
*
* \param d_src [IN] Source image pointer
* \param srcStride [IN] Source image stride
* \param d_dst [OUT] Output image pointer
* \param dstStride [IN] Output image stride
*
* \return None
*/
template <class T>
__global__ void transpose(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
__shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
Ncv32u blockIdx_x, blockIdx_y;
// do diagonal reordering
if (gridDim.x == gridDim.y)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x;
Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y;
Ncv32u index_gmem = xIndex + yIndex * srcStride;
if (xIndex < srcRoi.width)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.height)
{
tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride];
}
}
}
__syncthreads();
xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_gmem = xIndex + yIndex * dstStride;
if (xIndex < srcRoi.height)
{
for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if (yIndex + i < srcRoi.width)
{
d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
}
template <class T>
NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride,
T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM,
(srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM);
dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM);
transpose
<T>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcStride, d_dst, dstStride, srcRoi);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
template <class T>
static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride,
T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) &&
dstStride >= srcRoi.height * sizeof(T) &&
srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP);
srcStride /= sizeof(T);
dstStride /= sizeof(T);
for (Ncv32u i=0; i<srcRoi.height; i++)
{
for (Ncv32u j=0; j<srcRoi.width; j++)
{
h_dst[j*dstStride+i] = h_src[i*srcStride + j];
}
}
return NPPST_SUCCESS;
}
#define implementNppTranspose(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \
Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \
{ \
return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \
(Ncv##bit##u *)d_dst, dstStep, srcRoi); \
}
#define implementNppTransposeHost(bit, typ) \
NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \
Ncv##bit##typ *h_dst, Ncv32u dstStep, \
NcvSize32u srcRoi) \
{ \
return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \
(Ncv##bit##u *)h_dst, dstStep, srcRoi); \
}
implementNppTranspose(32,u)
implementNppTranspose(32,s)
implementNppTranspose(32,f)
implementNppTranspose(64,u)
implementNppTranspose(64,s)
implementNppTranspose(64,f)
implementNppTransposeHost(32,u)
implementNppTransposeHost(32,s)
implementNppTransposeHost(32,f)
implementNppTransposeHost(64,u)
implementNppTransposeHost(64,s)
implementNppTransposeHost(64,f)
NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep,
void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi)
{
return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi);
}
//==============================================================================
//
// Compact.cu
//
//==============================================================================
const Ncv32u NUM_REMOVE_THREADS = 256;
template <bool bRemove, bool bWritePartial>
__global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_blockSums,
Ncv32u elemRemove)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn > srcLen + blockDim.x)
{
return;
}
__shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2];
Ncv32u scanElem = 0;
if (elemAddrIn < srcLen)
{
if (bRemove)
{
scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0;
}
else
{
scanElem = d_src[elemAddrIn];
}
}
Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem);
__syncthreads();
if (elemAddrIn < srcLen)
{
if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial)
{
d_blockSums[blockId] = localScanInc;
}
if (bRemove)
{
d_offsets[elemAddrIn] = localScanInc - scanElem;
}
else
{
d_src[elemAddrIn] = localScanInc - scanElem;
}
}
}
__global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
__shared__ Ncv32u valOffs;
valOffs = d_blockSums[blockId];
__syncthreads();
d_offsets[elemAddrIn] += valOffs;
}
__global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_offsets, Ncv32u *d_dst,
Ncv32u elemRemove, Ncv32u *dstLenValue)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x;
if (elemAddrIn >= srcLen)
{
return;
}
Ncv32u elem = d_src[elemAddrIn];
Ncv32u elemAddrOut = d_offsets[elemAddrIn];
if (elem != elemRemove)
{
d_dst[elemAddrOut] = elem;
}
if (elemAddrIn == srcLen-1)
{
if (elem != elemRemove)
{
*dstLenValue = elemAddrOut + 1;
}
else
{
*dstLenValue = elemAddrOut;
}
}
}
NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *dstLenPinned,
Ncv32u elemRemove,
INCVMemAllocator &gpuAllocator)
{
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLenPinned != NULL)
{
*dstLenPinned = 0;
}
return NPPST_SUCCESS;
}
std::vector<Ncv32u> partSumNums;
std::vector<Ncv32u> partSumOffsets;
Ncv32u partSumLastNum = srcLen;
Ncv32u partSumLastOffs = 0;
do
{
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u),
gpuAllocator.alignment()) / sizeof(Ncv32u);
partSumLastOffs += curPartSumAlignedLength;
partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS;
}
while (partSumLastNum>1);
partSumNums.push_back(partSumLastNum);
partSumOffsets.push_back(partSumLastOffs);
NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1);
ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1);
ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
NCV_SKIP_COND_BEGIN
dim3 block(NUM_REMOVE_THREADS);
//calculate zero-level partial sums for indices calculation
if (partSumNums.size() > 2)
{
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
removePass1Scan
<true, true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcLen,
d_hierSums.ptr(),
d_hierSums.ptr() + partSumOffsets[1],
elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//calculate hierarchical partial sums
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
{
dim3 grid_partial(partSumNums[i+1]);
if (grid_partial.x > 65535)
{
grid_partial.y = (grid_partial.x + 65534) / 65535;
grid_partial.x = 65535;
}
if (grid_partial.x != 1)
{
removePass1Scan
<false, true>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
d_hierSums.ptr() + partSumOffsets[i+1],
0);
}
else
{
removePass1Scan
<false, false>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
NULL,
0);
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//adjust hierarchical partial sums
for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--)
{
dim3 grid_local(partSumNums[i+1]);
if (grid_local.x > 65535)
{
grid_local.y = (grid_local.x + 65534) / 65535;
grid_local.x = 65535;
}
removePass2Adjust
<<<grid_local, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i], partSumNums[i],
d_hierSums.ptr() + partSumOffsets[i+1]);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
}
else
{
dim3 grid_local(partSumNums[1]);
removePass1Scan
<true, false>
<<<grid_local, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcLen,
d_hierSums.ptr(),
NULL, elemRemove);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
}
//compact source vector using indices
dim3 grid(partSumNums[1]);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
removePass3Compact
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
(d_src, srcLen, d_hierSums.ptr(), d_dst,
elemRemove, d_numDstElements.ptr());
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
//get number of dst elements
if (dstLenPinned != NULL)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u),
cudaMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR);
}
NCV_SKIP_COND_END
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
*pBufsize = 0;
return NPPST_SUCCESS;
}
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE,
gpuCounter);
ncvAssertReturnNcvStat(ncvStat);
*pBufsize = (Ncv32u)gpuCounter.maxSize();
return NPPST_SUCCESS;
}
NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp)
{
return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp);
}
NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen,
Ncv32u *d_dst, Ncv32u *p_dstLen,
Ncv32u elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove,
gpuAllocator);
ncvAssertReturnNcvStat(ncvStat);
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen,
Ncv32s *d_dst, Ncv32u *p_dstLen,
Ncv32s elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp);
}
#if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4
typedef Ncv32u __attribute__((__may_alias__)) Ncv32u_a;
#else
typedef Ncv32u Ncv32u_a;
#endif
NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen,
Ncv32f *d_dst, Ncv32u *p_dstLen,
Ncv32f elemRemove, Ncv8u *pBuffer,
Ncv32u bufSize, cudaDeviceProp &devProp)
{
return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen,
*(Ncv32u_a *)&elemRemove, pBuffer, bufSize, devProp);
}
NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen,
Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove)
{
ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR);
if (srcLen == 0)
{
if (dstLen != NULL)
{
*dstLen = 0;
}
return NPPST_SUCCESS;
}
Ncv32u dstIndex = 0;
for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++)
{
if (h_src[srcIndex] != elemRemove)
{
h_dst[dstIndex++] = h_src[srcIndex];
}
}
if (dstLen != NULL)
{
*dstLen = dstIndex;
}
return NPPST_SUCCESS;
}
NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen,
Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen,
Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove)
{
return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u_a *)&elemRemove);
}
//==============================================================================
//
// Filter.cu
//
//==============================================================================
texture <float, 1, cudaReadModeElementType> texSrc;
texture <float, 1, cudaReadModeElementType> texKernel;
__forceinline__ __device__ float getValueMirrorRow(const int rowOffset,
int i,
int w)
{
if (i < 0) i = 1 - i;
if (i >= w) i = w + w - i - 1;
return tex1Dfetch (texSrc, rowOffset + i);
}
__forceinline__ __device__ float getValueMirrorColumn(const int offset,
const int rowStep,
int j,
int h)
{
if (j < 0) j = 1 - j;
if (j >= h) j = h + h - j - 1;
return tex1Dfetch (texSrc, offset + j * rowStep);
}
__global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
// position within ROI
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int j = roi.y + iy;
const int rowOffset = j * srcStep + roi.x;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width)
* tex1Dfetch (texKernel, m);
}
pDst[iy * dstStep + ix] = sum * multiplier;
}
__global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u roi,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= roi.width || iy >= roi.height)
{
return;
}
const int p = nKernelSize - nAnchor - 1;
const int i = roi.x + ix;
const int offset = i + roi.y * srcStep;
float sum = 0.0f;
for (int m = 0; m < nKernelSize; ++m)
{
sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height)
* tex1Dfetch (texKernel, m);
}
pDst[ix + iy * dstStep] = sum * multiplier;
}
NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderNone:
return NPPST_ERROR;
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
FilterRowBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u oROI,
NppStBorderType borderType,
const Ncv32f *pKernel,
Ncv32s nKernelSize,
Ncv32s nAnchor,
Ncv32f multiplier)
{
ncvAssertReturn (pSrc != NULL &&
pDst != NULL &&
pKernel != NULL, NCV_NULL_PTR);
ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
dstSize.width * sizeof (Ncv32f) <= nDstStep &&
oROI.width * sizeof (Ncv32f) <= nSrcStep &&
oROI.width * sizeof (Ncv32f) <= nDstStep &&
nSrcStep % sizeof (Ncv32f) == 0 &&
nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// adjust ROI size to be within source image
if (oROI.x + oROI.width > srcSize.width)
{
oROI.width = srcSize.width - oROI.x;
}
if (oROI.y + oROI.height > srcSize.height)
{
oROI.height = srcSize.height - oROI.y;
}
cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> ();
texSrc.normalized = false;
texKernel.normalized = false;
cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep);
cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f));
dim3 ctaSize (32, 6);
dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x,
(oROI.height + ctaSize.y - 1) / ctaSize.y);
switch (borderType)
{
case nppStBorderClamp:
return NPPST_ERROR;
case nppStBorderWrap:
return NPPST_ERROR;
case nppStBorderMirror:
FilterColumnBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
break;
default:
return NPPST_ERROR;
}
return NPPST_SUCCESS;
}
//==============================================================================
//
// FrameInterpolate.cu
//
//==============================================================================
inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom)
{
return (num + denom - 1)/denom;
}
texture<float, 2, cudaReadModeElementType> tex_src1;
texture<float, 2, cudaReadModeElementType> tex_src0;
__global__ void BlendFramesKernel(const float *u, const float *v, // forward flow
const float *ur, const float *vr, // backward flow
const float *o0, const float *o1, // coverage masks
int w, int h, int s,
float theta, float *out)
{
const int ix = threadIdx.x + blockDim.x * blockIdx.x;
const int iy = threadIdx.y + blockDim.y * blockIdx.y;
const int pos = ix + s * iy;
if (ix >= w || iy >= h) return;
float _u = u[pos];
float _v = v[pos];
float _ur = ur[pos];
float _vr = vr[pos];
float x = (float)ix + 0.5f;
float y = (float)iy + 0.5f;
bool b0 = o0[pos] > 1e-4f;
bool b1 = o1[pos] > 1e-4f;
if (b0 && b1)
{
// pixel is visible on both frames
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) +
tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta;
}
else if (b0)
{
// visible on the first frame only
out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta);
}
else
{
// visible on the second frame only
out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta));
}
}
NCVStatus BlendFrames(const Ncv32f *src0,
const Ncv32f *src1,
const Ncv32f *ufi,
const Ncv32f *vfi,
const Ncv32f *ubi,
const Ncv32f *vbi,
const Ncv32f *o1,
const Ncv32f *o2,
Ncv32u width,
Ncv32u height,
Ncv32u stride,
Ncv32f theta,
Ncv32f *out)
{
tex_src1.addressMode[0] = cudaAddressModeClamp;
tex_src1.addressMode[1] = cudaAddressModeClamp;
tex_src1.filterMode = cudaFilterModeLinear;
tex_src1.normalized = false;
tex_src0.addressMode[0] = cudaAddressModeClamp;
tex_src0.addressMode[1] = cudaAddressModeClamp;
tex_src0.filterMode = cudaFilterModeLinear;
tex_src0.normalized = false;
cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> ();
const Ncv32u pitch = stride * sizeof (float);
ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR);
dim3 threads (32, 4);
dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y));
BlendFramesKernel<<<blocks, threads, 0, nppStGetActiveCUDAstream ()>>>
(ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize,
Ncv32u nStep,
Ncv32u *hpSize)
{
NCVStatus status = NPPST_ERROR;
status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize);
return status;
}
NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState)
{
// check state validity
ncvAssertReturn (pState->pSrcFrame0 != 0 &&
pState->pSrcFrame1 != 0 &&
pState->pFU != 0 &&
pState->pFV != 0 &&
pState->pBU != 0 &&
pState->pBV != 0 &&
pState->pNewFrame != 0 &&
pState->ppBuffers[0] != 0 &&
pState->ppBuffers[1] != 0 &&
pState->ppBuffers[2] != 0 &&
pState->ppBuffers[3] != 0 &&
pState->ppBuffers[4] != 0 &&
pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (pState->size.width > 0 &&
pState->size.height > 0, NPPST_ERROR);
ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) &&
pState->nStep > 0 &&
pState->nStep % sizeof (Ncv32f) == 0,
NPPST_INVALID_STEP);
// change notation
Ncv32f *cov0 = pState->ppBuffers[0];
Ncv32f *cov1 = pState->ppBuffers[1];
Ncv32f *fwdU = pState->ppBuffers[2]; // forward u
Ncv32f *fwdV = pState->ppBuffers[3]; // forward v
Ncv32f *bwdU = pState->ppBuffers[4]; // backward u
Ncv32f *bwdV = pState->ppBuffers[5]; // backward v
// warp flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV,
pState->size,
pState->nStep,
pState->pFU,
pState->pFV,
pState->nStep,
cov0,
pState->pos,
fwdV) );
// warp backward flow
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
ncvAssertReturnNcvStat (
nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV,
pState->size,
pState->nStep,
pState->pBU,
pState->pBV,
pState->nStep,
cov1,
1.0f - pState->pos,
bwdU) );
// interpolate frame
ncvAssertReturnNcvStat (
BlendFrames (pState->pSrcFrame0,
pState->pSrcFrame1,
fwdU,
fwdV,
bwdU,
bwdV,
cov0,
cov1,
pState->size.width,
pState->size.height,
pState->nStep / sizeof (Ncv32f),
pState->pos,
pState->pNewFrame) );
return NPPST_SUCCESS;
}
//==============================================================================
//
// VectorWarpFrame.cu
//
//==============================================================================
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
// FP32 atomic add
static __forceinline__ __device__ float _atomicAdd(float *addr, float val)
{
float old = *addr, assumed;
do {
assumed = old;
old = int_as_float(__iAtomicCAS((int*)addr,
float_as_int(assumed),
float_as_int(val+assumed)));
} while( assumed!=old );
return old;
}
#else
#define _atomicAdd atomicAdd
#endif
__global__ void ForwardWarpKernel_PSF2x2(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *normalization_factor,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
//bottom left corner of a target pixel
float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f;
float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
float px;
float py;
float dx = modff (cx, &px);
float dy = modff (cy, &py);
// target pixel integer coords
int tx;
int ty;
tx = (int) px;
ty = (int) py;
float value = src[image_row_offset + j];
float weight;
// fill pixel containing bottom right corner
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing bottom left corner
tx -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * dy;
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper left corner
ty -= 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = (1.0f - dx) * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
// fill pixel containing upper right corner
tx += 1;
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
weight = dx * (1.0f - dy);
_atomicAdd (dst + ty * image_stride + tx, value * weight);
_atomicAdd (normalization_factor + ty * image_stride + tx, weight);
}
}
__global__ void ForwardWarpKernel_PSF1x1(const float *u,
const float *v,
const float *src,
const int w,
const int h,
const int flow_stride,
const int image_stride,
const float time_scale,
float *dst)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= h || j >= w) return;
int flow_row_offset = i * flow_stride;
int image_row_offset = i * image_stride;
float u_ = u[flow_row_offset + j];
float v_ = v[flow_row_offset + j];
//bottom left corner of target pixel
float cx = u_ * time_scale + (float)j + 1.0f;
float cy = v_ * time_scale + (float)i + 1.0f;
// pixel containing bottom left corner
int tx = __float2int_rn (cx);
int ty = __float2int_rn (cy);
float value = src[image_row_offset + j];
// fill pixel
if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0)))
{
_atomicAdd (dst + ty * image_stride + tx, value);
}
}
__global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * s + j;
float scale = normalization_factor[pos];
float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale);
image[pos] *= invScale;
}
__global__ void MemsetKernel(const float value, int w, int h, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * w + j;
image[pos] = value;
}
NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize)
{
ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep,
NPPST_INVALID_STEP);
*hpSize = nSrcStep * srcSize.height;
return NPPST_SUCCESS;
}
// does not require normalization
NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof (Ncv32f);
dim3 ctaSize (32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
ForwardWarpKernel_PSF1x1 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
const Ncv32f *pU,
const Ncv32f *pV,
Ncv32u nVFStep,
Ncv32f *pBuffer,
Ncv32f timeScale,
Ncv32f *pDst)
{
ncvAssertReturn (pSrc != NULL &&
pU != NULL &&
pV != NULL &&
pDst != NULL &&
pBuffer != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep &&
srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u vfStep = nVFStep / sizeof(Ncv32f);
dim3 ctaSize(32, 6);
dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y));
MemsetKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(0, srcSize.width, srcSize.height, pBuffer);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
ForwardWarpKernel_PSF2x2 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
NormalizeKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>>
(pBuffer, srcSize.width, srcSize.height, srcStep, pDst);
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return NPPST_SUCCESS;
}
//==============================================================================
//
// Resize.cu
//
//==============================================================================
texture <float, 2, cudaReadModeElementType> texSrc2D;
__forceinline__
__device__ float processLine(int spos,
float xmin,
float xmax,
int ixmin,
int ixmax,
float fxmin,
float cxmax)
{
// first element
float wsum = 1.0f - xmin + fxmin;
float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin);
spos++;
for (int ix = ixmin + 1; ix < ixmax; ++ix)
{
sum += tex1Dfetch(texSrc, spos);
spos++;
wsum += 1.0f;
}
sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax);
wsum += cxmax - xmax;
return sum / wsum;
}
__global__ void resizeSuperSample_32f(NcvSize32u srcSize,
Ncv32u srcStep,
NcvRect32u srcROI,
Ncv32f *dst,
NcvSize32u dstSize,
Ncv32u dstStep,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
// position within dst ROI
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
// source position
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// x sampling range
float xBegin = fmax (x - scaleX, 0.0f);
float xEnd = fmin (x + scaleX, rw - 1.0f);
// y sampling range
float yBegin = fmax (y - scaleY, 0.0f);
float yEnd = fmin (y + scaleY, rh - 1.0f);
// x range of source samples
float floorXBegin = floorf (xBegin);
float ceilXEnd = ceilf (xEnd);
int iXBegin = srcROI.x + (int) floorXBegin;
int iXEnd = srcROI.x + (int) ceilXEnd;
// y range of source samples
float floorYBegin = floorf (yBegin);
float ceilYEnd = ceilf (yEnd);
int iYBegin = srcROI.y + (int) floorYBegin;
int iYEnd = srcROI.y + (int) ceilYEnd;
// first row
int pos = iYBegin * srcStep + iXBegin;
float wsum = 1.0f - yBegin + floorYBegin;
float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (1.0f - yBegin + floorYBegin);
pos += srcStep;
for (int iy = iYBegin + 1; iy < iYEnd; ++iy)
{
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd);
pos += srcStep;
wsum += 1.0f;
}
sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin,
ceilXEnd) * (ceilYEnd - yEnd);
wsum += ceilYEnd - yEnd;
sum /= wsum;
dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum;
}
// bicubic interpolation
__forceinline__
__device__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f)
{
return x * x * (1.5f * x - 2.5f) + 1.0f;
}
else if (x < 2.0f)
{
return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
}
else
{
return 0.0f;
}
}
__global__ void resizeBicubic(NcvSize32u srcSize,
NcvRect32u srcROI,
NcvSize32u dstSize,
Ncv32u dstStep,
Ncv32f *dst,
NcvRect32u dstROI,
Ncv32f scaleX,
Ncv32f scaleY)
{
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= dstROI.width || iy >= dstROI.height)
{
return;
}
const float dx = 1.0f / srcROI.width;
const float dy = 1.0f / srcROI.height;
float rx = (float) srcROI.x;
float ry = (float) srcROI.y;
float rw = (float) srcROI.width;
float rh = (float) srcROI.height;
float x = scaleX * (float) ix;
float y = scaleY * (float) iy;
// sampling range
// border mode is clamp
float xmin = fmax (ceilf (x - 2.0f), 0.0f);
float xmax = fmin (floorf (x + 2.0f), rw - 1.0f);
float ymin = fmax (ceilf (y - 2.0f), 0.0f);
float ymax = fmin (floorf (y + 2.0f), rh - 1.0f);
// shift data window to match ROI
rx += 0.5f;
ry += 0.5f;
x += rx;
y += ry;
xmin += rx;
xmax += rx;
ymin += ry;
ymax += ry;
float sum = 0.0f;
float wsum = 0.0f;
for (float cy = ymin; cy <= ymax; cy += 1.0f)
{
for (float cx = xmin; cx <= xmax; cx += 1.0f)
{
float xDist = x - cx;
float yDist = y - cy;
float wx = bicubicCoeff (xDist);
float wy = bicubicCoeff (yDist);
wx *= wy;
sum += wx * tex2D (texSrc2D, cx * dx, cy * dy);
wsum += wx;
}
}
dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum;
}
NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc,
NcvSize32u srcSize,
Ncv32u nSrcStep,
NcvRect32u srcROI,
Ncv32f *pDst,
NcvSize32u dstSize,
Ncv32u nDstStep,
NcvRect32u dstROI,
Ncv32f xFactor,
Ncv32f yFactor,
NppStInterpMode interpolation)
{
NCVStatus status = NPPST_SUCCESS;
ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR);
ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE);
ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width &&
nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width,
NPPST_INVALID_STEP);
Ncv32u srcStep = nSrcStep / sizeof (Ncv32f);
Ncv32u dstStep = nDstStep / sizeof (Ncv32f);
// TODO: preprocess ROI to prevent out of bounds access
if (interpolation == nppStSupersample)
{
// bind texture
cudaBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep);
// invoke kernel
dim3 ctaSize (32, 6);
dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x,
(dstROI.height + ctaSize.y - 1) / ctaSize.y);
resizeSuperSample_32f <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else if (interpolation == nppStBicubic)
{
texSrc2D.addressMode[0] = cudaAddressModeMirror;
texSrc2D.addressMode[1] = cudaAddressModeMirror;
texSrc2D.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> ();
cudaBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height,
nSrcStep);
dim3 ctaSize (32, 6);
dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x,
(dstSize.height + ctaSize.y - 1) / ctaSize.y);
resizeBicubic <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>>
(srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor);
}
else
{
status = NPPST_ERROR;
}
ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR);
return status;
}
#endif /* CUDA_DISABLER */
|
8bf717aadf8356be9db35ef580999614ec0bbc20.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cusp/blas/blas.h>
#include<cusp/csr_matrix.h>
#include<cusp/multiply.h>
#include <cusp/array1d.h>
#include <cusp/copy.h>
#include <thrust/device_ptr.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
/* Input Arguments */
#define ROW prhs[0]
#define NPTR prhs[1]
#define NNZ prhs[2]
/* Output Arguments */
#define ROW_OUT plhs[0]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
mxGPUArray const *Arow;
mxGPUArray *rowptr;
mxInitGPU(); /* Initialize the MathWorks GPU API. */
int nptr = lrint(mxGetScalar(NPTR));
int nnz = lrint(mxGetScalar(NNZ));
const mwSize ndim= 1;
// const mwSize dimcol[]={mwSize(nnz)};
const mwSize dimptr[]={mwSize(nptr)};
// mexPrintf("nrows=%d,nnz=%d\n", dimptr[0],dimcol[0]);
// input output array
Arow = mxGPUCreateFromMxArray(ROW);
rowptr = mxGPUCreateGPUArray(ndim,dimptr,mxINT32_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
// mexPrintf("created matrix, nrows=%d,nnz=%d\n", nptr,nnz);
// pointer from matlab
int *d_Arow =(int *)(mxGPUGetDataReadOnly(Arow));
int *d_rowptr =(int *)(mxGPUGetData(rowptr));
// wrap with thrust::device_ptr
thrust::device_ptr<int> wrap_d_Arow (d_Arow);
thrust::device_ptr<int> wrap_d_rowptr (d_rowptr);
// convert to ptr
thrust::lower_bound(wrap_d_Arow,
wrap_d_Arow+nnz,
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(nptr),
wrap_d_rowptr);
//bring back to matlab
ROW_OUT = mxGPUCreateMxArrayOnGPU(rowptr);
//clean up
mxGPUDestroyGPUArray(Arow);
mxGPUDestroyGPUArray(rowptr);
return;
}
| 8bf717aadf8356be9db35ef580999614ec0bbc20.cu | #include <cuda.h>
#include <cusp/blas/blas.h>
#include<cusp/csr_matrix.h>
#include<cusp/multiply.h>
#include <cusp/array1d.h>
#include <cusp/copy.h>
#include <thrust/device_ptr.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
/* Input Arguments */
#define ROW prhs[0]
#define NPTR prhs[1]
#define NNZ prhs[2]
/* Output Arguments */
#define ROW_OUT plhs[0]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
mxGPUArray const *Arow;
mxGPUArray *rowptr;
mxInitGPU(); /* Initialize the MathWorks GPU API. */
int nptr = lrint(mxGetScalar(NPTR));
int nnz = lrint(mxGetScalar(NNZ));
const mwSize ndim= 1;
// const mwSize dimcol[]={mwSize(nnz)};
const mwSize dimptr[]={mwSize(nptr)};
// mexPrintf("nrows=%d,nnz=%d\n", dimptr[0],dimcol[0]);
// input output array
Arow = mxGPUCreateFromMxArray(ROW);
rowptr = mxGPUCreateGPUArray(ndim,dimptr,mxINT32_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
// mexPrintf("created matrix, nrows=%d,nnz=%d\n", nptr,nnz);
// pointer from matlab
int *d_Arow =(int *)(mxGPUGetDataReadOnly(Arow));
int *d_rowptr =(int *)(mxGPUGetData(rowptr));
// wrap with thrust::device_ptr
thrust::device_ptr<int> wrap_d_Arow (d_Arow);
thrust::device_ptr<int> wrap_d_rowptr (d_rowptr);
// convert to ptr
thrust::lower_bound(wrap_d_Arow,
wrap_d_Arow+nnz,
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(nptr),
wrap_d_rowptr);
//bring back to matlab
ROW_OUT = mxGPUCreateMxArrayOnGPU(rowptr);
//clean up
mxGPUDestroyGPUArray(Arow);
mxGPUDestroyGPUArray(rowptr);
return;
}
|
097ab813095c8e8832441f0a359676a0953709e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver on 4/9/2018.
//
#include <Environment.h>
#include "../indexreduce.h"
#include <op_boilerplate.h>
#include <helpers/DebugHelper.h>
#include "../legacy_ops.h"
template <typename T>
static __device__ void indexReduceGeneric(
const int op,
T *dx,
Nd4jLong *xShapeInfo, int xRank,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::indexreduce::IndexReduce<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::indexreduce::IndexReduce<T>::transform(
op,
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationBuffer,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
__global__ void indexReduceDouble(
int op,
double *dx,
Nd4jLong *xShapeInfo, int xRank,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceGeneric<double>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceFloat(
int op,
float *dx,
Nd4jLong *xShapeInfo, int xRank,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceGeneric<float>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceHalf(
int op,
float16 *dx,
Nd4jLong *xShapeInfo, int xRank,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceGeneric<float16>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
namespace functions {
namespace indexreduce {
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, float *dx, Nd4jLong *xShapeInfo, int xRank, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarFloat(...) failed");
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, double *dx, Nd4jLong *xShapeInfo, int xRank, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarDouble(...) failed");
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, float16 *dx, Nd4jLong *xShapeInfo, int xRank, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarHalf(...) failed");
}
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, float *dx, Nd4jLong *xShapeInfo, int xRank, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, double *dx, Nd4jLong *xShapeInfo, int xRank, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, float16 *dx, Nd4jLong *xShapeInfo, int xRank, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<float> {
__device__ IndexValue<float> * getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<double> {
__device__ IndexValue<double> * getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::aggregatePartials(IndexValue<T> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements,T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
IndexValue<T> *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<T> prev = sPartials[tid - floorPow2];
IndexValue<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<T> curr = sPartials[tid];
IndexValue<T> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr,next,extraParams);
}
__syncthreads();
}
}
template <typename T>
__device__ void IndexReduce<T>::transform(
const int opNum,
T *x,
Nd4jLong *xShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffset) {
DISPATCH_BY_OPNUM(transform, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS);
}
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::transform(
T *dx,
Nd4jLong *xShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
Nd4jLong *tadOnlyShapeInfo,
Nd4jLong *tadOffsets){
/**int
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
//shared memory space for storing intermediate results
IndexValue<T> *sPartials;
sPartials = (IndexValue<T> *)manager->getSharedReductionBuffer(); //holder.getPointer();
// T startingVal = OpType::startingValue(dx);
// IndexValue <T> val = {startingVal, threadIdx.x};
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
//length for the tad
__shared__ volatile Nd4jLong xLength;
__shared__ volatile Nd4jLong resultLength;
//only compute the tad indexes once
IndexValue <T> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
// xElementWiseStride = shape::elementWiseStride(xShapeInfo);
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ Nd4jLong tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ Nd4jLong *tadShape;
__shared__ Nd4jLong *tadStride;
__shared__ char tadOrder;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
tadOrder = shape::order(tadOnlyShapeInfo);
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
Nd4jLong xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for(int i = threadIdx.x;i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank,tadShape, i, tadLength, xCoord);
auto xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
IndexValue<T> comp {dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = (T) sPartials[threadIdx.x].index;
}
}
} else {
for(int i = blockIdx.x; i < numTads; i+= gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) {
IndexValue<T> comp {dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = (T) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
//reduce to 1 result
else if (resultScalar) {
auto n = shape::length(xShapeInfo);
auto xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if(xElementWiseStride >= 1) {
for(Nd4jLong i = tid;i < n; i += (blockDim.x * gridDim.x)) {
IndexValue <T> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
auto rank = shape::rank(xShapeInfo);
Nd4jLong ind2sub[MAX_RANK];
for(Nd4jLong i = tid;i < n; i += blockDim.x * gridDim.x) {
shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, n, ind2sub);
Nd4jLong offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),ind2sub,rank);
IndexValue <T> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, (int) n),extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams);
__syncthreads();
if (tid == 0) {
result[0] = (T) sPartials[0].index;
}
}
} else {
if (tid == 0) {
unsigned int *tc = (unsigned *) reductionBuffer;
tc[16384] = 0;
result[0] = (T) sPartials[0].index;
}
}
}
}
}
}
| 097ab813095c8e8832441f0a359676a0953709e5.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver on 4/9/2018.
//
#include <Environment.h>
#include "../indexreduce.h"
#include <op_boilerplate.h>
#include <helpers/DebugHelper.h>
#include "../legacy_ops.h"
template <typename T>
static __device__ void indexReduceGeneric(
const int op,
T *dx,
Nd4jLong *xShapeInfo, int xRank,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::indexreduce::IndexReduce<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::indexreduce::IndexReduce<T>::transform(
op,
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationBuffer,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
__global__ void indexReduceDouble(
int op,
double *dx,
Nd4jLong *xShapeInfo, int xRank,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceGeneric<double>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceFloat(
int op,
float *dx,
Nd4jLong *xShapeInfo, int xRank,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceGeneric<float>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceHalf(
int op,
float16 *dx,
Nd4jLong *xShapeInfo, int xRank,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceGeneric<float16>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
namespace functions {
namespace indexreduce {
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, float *dx, Nd4jLong *xShapeInfo, int xRank, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarFloat(...) failed");
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, double *dx, Nd4jLong *xShapeInfo, int xRank, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarDouble(...) failed");
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, float16 *dx, Nd4jLong *xShapeInfo, int xRank, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarHalf(...) failed");
}
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, float *dx, Nd4jLong *xShapeInfo, int xRank, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, double *dx, Nd4jLong *xShapeInfo, int xRank, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, float16 *dx, Nd4jLong *xShapeInfo, int xRank, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<float> {
__device__ IndexValue<float> * getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<double> {
__device__ IndexValue<double> * getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::aggregatePartials(IndexValue<T> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements,T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
IndexValue<T> *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<T> prev = sPartials[tid - floorPow2];
IndexValue<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<T> curr = sPartials[tid];
IndexValue<T> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr,next,extraParams);
}
__syncthreads();
}
}
template <typename T>
__device__ void IndexReduce<T>::transform(
const int opNum,
T *x,
Nd4jLong *xShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
Nd4jLong *tadShapeInfo,
Nd4jLong *tadOffset) {
DISPATCH_BY_OPNUM(transform, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS);
}
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::transform(
T *dx,
Nd4jLong *xShapeInfo,
T *extraParams,
T *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
Nd4jLong *tadOnlyShapeInfo,
Nd4jLong *tadOffsets){
/**int
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
//shared memory space for storing intermediate results
IndexValue<T> *sPartials;
sPartials = (IndexValue<T> *)manager->getSharedReductionBuffer(); //holder.getPointer();
// T startingVal = OpType::startingValue(dx);
// IndexValue <T> val = {startingVal, threadIdx.x};
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
//length for the tad
__shared__ volatile Nd4jLong xLength;
__shared__ volatile Nd4jLong resultLength;
//only compute the tad indexes once
IndexValue <T> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
// xElementWiseStride = shape::elementWiseStride(xShapeInfo);
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ Nd4jLong tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ Nd4jLong *tadShape;
__shared__ Nd4jLong *tadStride;
__shared__ char tadOrder;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
tadOrder = shape::order(tadOnlyShapeInfo);
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
Nd4jLong xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for(int i = threadIdx.x;i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank,tadShape, i, tadLength, xCoord);
auto xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
IndexValue<T> comp {dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = (T) sPartials[threadIdx.x].index;
}
}
} else {
for(int i = blockIdx.x; i < numTads; i+= gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) {
IndexValue<T> comp {dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = (T) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
//reduce to 1 result
else if (resultScalar) {
auto n = shape::length(xShapeInfo);
auto xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if(xElementWiseStride >= 1) {
for(Nd4jLong i = tid;i < n; i += (blockDim.x * gridDim.x)) {
IndexValue <T> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
auto rank = shape::rank(xShapeInfo);
Nd4jLong ind2sub[MAX_RANK];
for(Nd4jLong i = tid;i < n; i += blockDim.x * gridDim.x) {
shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, n, ind2sub);
Nd4jLong offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),ind2sub,rank);
IndexValue <T> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, (int) n),extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams);
__syncthreads();
if (tid == 0) {
result[0] = (T) sPartials[0].index;
}
}
} else {
if (tid == 0) {
unsigned int *tc = (unsigned *) reductionBuffer;
tc[16384] = 0;
result[0] = (T) sPartials[0].index;
}
}
}
}
}
}
|
77fc9701c5dc39ee8204e571949625316e40aac2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int a, int b, int *c)
{
*c = a + b;
}
int main()
{
int a, b, c;
int *dev_c;
a=3;
b=4;
hipMalloc((void**) & dev_c, sizeof(int));
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, a, b, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d + %d is %d\n", a, b, c);
hipFree(dev_c);
return 0;
} | 77fc9701c5dc39ee8204e571949625316e40aac2.cu | #include <stdio.h>
__global__ void add(int a, int b, int *c)
{
*c = a + b;
}
int main()
{
int a, b, c;
int *dev_c;
a=3;
b=4;
cudaMalloc((void**) & dev_c, sizeof(int));
add<<<1,1>>>(a, b, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d is %d\n", a, b, c);
cudaFree(dev_c);
return 0;
} |
a6b0126057cf3c45528338dd39727b3be19eaf97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "packet.h"
__global__ void mult_gpu(int *A, int *B, int *C, int size){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int i, k;
int sum = 0;
if(tid < TD_NUM){
for(i = 0; i < (size*size/TD_NUM); i++){
for(k = 0; k < size; k++){
sum += A[((i*TD_NUM+tid)/size)*size+k] * B[k*size+((i*TD_NUM+tid)%size)];
}
C[((i*TD_NUM+tid)/size)*size+((i*TD_NUM+tid)%size)] = sum;
if(k == size) sum = 0;
}
}
}
void mult(int *A, int *B, int *C, int size){
int i, j, k;
int sum = 0;
for(j = 0; j < TD_NUM; j++)
for(i = 0; i < (size*size/TD_NUM); i++){
for(k = 0; k < size; k++){
sum += A[((i*TD_NUM+j)/size)*size+k] * B[k*size+((i*TD_NUM+j)%size)];
}
C[((i*TD_NUM+j)/size)*size+((i*TD_NUM+j)%size)] = sum;
if(k == size) sum = 0;
}
}
__device__ void explode ( float x, float y, int *value){
int k;
float x1;
float x2;
float y1;
float y2;
//int value;
*value = 0;
x1 = x;
y1 = y;
for ( k = 1; k <= count_max; k++ )
{
x2 = x1 * x1 - y1 * y1 + x;
y2 = 2.0 * x1 * y1 + y;
if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 )
{
*value = k;
//if(k > 1000)
//printf("k:%d\n", k);
break;
}
x1 = x2;
y1 = y2;
}
}
__global__ void get_pixel(int *count, float *index){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int i;
float x, y;
if(tid < TD_NUM){
for(i = 0; i < (n*n/TD_NUM); i++){
//for(j = 0; j < TD_NUM; j++){
//for ( i = 0; i < n; i++ ){
//for ( j = 0; j < n; j++ ){
x = ( ( float ) ( (i*TD_NUM+tid)%n ) * (x_max + *index)
+ ( float ) ( n - ((i*TD_NUM+tid)%n) - 1 ) * (x_min + *index) )
/ ( float ) ( n - 1 );
y = ( ( float ) ( (i*TD_NUM+tid)/n ) * (y_max + *index)
+ ( float ) ( n - ((i*TD_NUM+tid)/n) - 1 ) * (y_min + *index) )
/ ( float ) ( n - 1 );
explode ( x, y, &count[((i*TD_NUM+tid)/n) + ((i*TD_NUM+tid)%n) * n] );
}
}
}
void h_explode ( float x, float y, int *value){
int k;
float x1;
float x2;
float y1;
float y2;
//int value;
*value = 0;
x1 = x;
y1 = y;
for ( k = 1; k <= count_max; k++ )
{
x2 = x1 * x1 - y1 * y1 + x;
y2 = 2.0 * x1 * y1 + y;
if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 )
{
*value = k;
//if(k > 1000)
//printf("k:%d\n", k);
break;
}
x1 = x2;
y1 = y2;
}
}
void h_get_pixel(int *count, float index){
int i, j;
float x, y;
for ( i = 0; i < n; i++ )
{
for ( j = 0; j < n; j++ )
{
x = ( ( float ) ( j ) * (x_max + index)
+ ( float ) ( n - j - 1 ) * (x_min + index) )
/ ( float ) ( n - 1 );
y = ( ( float ) ( i ) * (y_max + index)
+ ( float ) ( n - i - 1 ) * (y_min + index) )
/ ( float ) ( n - 1 );
h_explode ( x, y, &count[i + j * n] );
}
}
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j, k;
//convolving H
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_H[j*TD_NUM+tid] += (r[(j*TD_NUM+tid)-k]*H[k]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM; j++)
Vect_Dn[(j*TD_NUM+tid)]=Vect_H[(j*TD_NUM+tid)*N_samp];
//Up Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM;j++)
Vect_Up[(j*TD_NUM+tid)*N_samp]=Vect_Dn[(j*TD_NUM+tid)];
__syncthreads();
//convolving F
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_F[j*TD_NUM+tid]+=(F[k]*Vect_Up[(j*TD_NUM+tid)-k]);
}
}
}
}
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F){
int j, k, p;
//convolving H
for (j=0; j< N_sim; j++)
{
//for (k=0; ((k<N_col) & ((j-k)>=0)); k++)
for(k = 0; k < N_col; k++){
if((j-k)>=0){
//Vect_H[j]+=H[k]*r[j-k];
Vect_H[j] += (r[j-k]*H[k]);
}
}
}
//Down Sampling
for (j=0; j < N_sim/N_samp; j++)
Vect_Dn[j]=Vect_H[j*N_samp];
//Up Sampling
for (j=0; j < N_sim/N_samp;j++)
Vect_Up[j*N_samp]=Vect_Dn[j];
//convolving F
for (j=0; j< N_sim; j++)
{
//for (k=0; ((k<N_col) & ((j-k)>=0)); k++)
for(k = 0; k < N_col; k++){
if((j-k)>=0){
//Vect_H[j]+=H[k]*r[j-k];
Vect_F[j]+=(F[k]*Vect_Up[j-k]);
}
}
}
}
__device__ void DES_ROUND_dev(uint32 *SK, uint32 X, uint32 Y)
{
uint32 T;
T = *SK ^ X;
Y ^= SB8[ (T ) & 0x3F ] ^
SB6[ (T >> 8) & 0x3F ] ^
SB4[ (T >> 16) & 0x3F ] ^
SB2[ (T >> 24) & 0x3F ];
T = *SK++ ^ ((X << 28) | (X >> 4));
Y ^= SB7[ (T ) & 0x3F ] ^
SB5[ (T >> 8) & 0x3F ] ^
SB3[ (T >> 16) & 0x3F ] ^
SB1[ (T >> 24) & 0x3F ];
}
/* DES key schedule */
int des_main_ks( uint32 *SK, uint8 *key )
{
int i;
uint32 X, Y, T;
GET_UINT32( X, key, 0 );
GET_UINT32( Y, key, 4 );
/* Permuted Choice 1 */
T = ((Y >> 4) ^ X) & 0x0F0F0F0F; X ^= T; Y ^= (T << 4);
T = ((Y ) ^ X) & 0x10101010; X ^= T; Y ^= (T );
X = (LHs[ (X ) & 0xF] << 3) | (LHs[ (X >> 8) & 0xF ] << 2)
| (LHs[ (X >> 16) & 0xF] << 1) | (LHs[ (X >> 24) & 0xF ] )
| (LHs[ (X >> 5) & 0xF] << 7) | (LHs[ (X >> 13) & 0xF ] << 6)
| (LHs[ (X >> 21) & 0xF] << 5) | (LHs[ (X >> 29) & 0xF ] << 4);
Y = (RHs[ (Y >> 1) & 0xF] << 3) | (RHs[ (Y >> 9) & 0xF ] << 2)
| (RHs[ (Y >> 17) & 0xF] << 1) | (RHs[ (Y >> 25) & 0xF ] )
| (RHs[ (Y >> 4) & 0xF] << 7) | (RHs[ (Y >> 12) & 0xF ] << 6)
| (RHs[ (Y >> 20) & 0xF] << 5) | (RHs[ (Y >> 28) & 0xF ] << 4);
X &= 0x0FFFFFFF;
Y &= 0x0FFFFFFF;
/* calculate subkeys */
for( i = 0; i < 16; i++ )
{
if( i < 2 || i == 8 || i == 15 )
{
X = ((X << 1) | (X >> 27)) & 0x0FFFFFFF;
Y = ((Y << 1) | (Y >> 27)) & 0x0FFFFFFF;
}
else
{
X = ((X << 2) | (X >> 26)) & 0x0FFFFFFF;
Y = ((Y << 2) | (Y >> 26)) & 0x0FFFFFFF;
}
*SK++ = ((X << 4) & 0x24000000) | ((X << 28) & 0x10000000)
| ((X << 14) & 0x08000000) | ((X << 18) & 0x02080000)
| ((X << 6) & 0x01000000) | ((X << 9) & 0x00200000)
| ((X >> 1) & 0x00100000) | ((X << 10) & 0x00040000)
| ((X << 2) & 0x00020000) | ((X >> 10) & 0x00010000)
| ((Y >> 13) & 0x00002000) | ((Y >> 4) & 0x00001000)
| ((Y << 6) & 0x00000800) | ((Y >> 1) & 0x00000400)
| ((Y >> 14) & 0x00000200) | ((Y ) & 0x00000100)
| ((Y >> 5) & 0x00000020) | ((Y >> 10) & 0x00000010)
| ((Y >> 3) & 0x00000008) | ((Y >> 18) & 0x00000004)
| ((Y >> 26) & 0x00000002) | ((Y >> 24) & 0x00000001);
*SK++ = ((X << 15) & 0x20000000) | ((X << 17) & 0x10000000)
| ((X << 10) & 0x08000000) | ((X << 22) & 0x04000000)
| ((X >> 2) & 0x02000000) | ((X << 1) & 0x01000000)
| ((X << 16) & 0x00200000) | ((X << 11) & 0x00100000)
| ((X << 3) & 0x00080000) | ((X >> 6) & 0x00040000)
| ((X << 15) & 0x00020000) | ((X >> 4) & 0x00010000)
| ((Y >> 2) & 0x00002000) | ((Y << 8) & 0x00001000)
| ((Y >> 14) & 0x00000808) | ((Y >> 9) & 0x00000400)
| ((Y ) & 0x00000200) | ((Y << 7) & 0x00000100)
| ((Y >> 7) & 0x00000020) | ((Y >> 3) & 0x00000011)
| ((Y << 2) & 0x00000004) | ((Y >> 21) & 0x00000002);
}
return( 0 );
}
int des_set_key( uint32 *esk, uint32 *dsk, uint8 key1[8],
uint8 key2[8], uint8 key3[8])
{
int i;
/* setup encryption subkeys */
des_main_ks( esk, key1 );
des_main_ks( dsk + 32, key2 );
des_main_ks( esk + 64, key3 );
/* setup decryption subkeys */
for( i = 0; i < 32; i += 2 )
{
dsk[i] = esk[94 - i];
dsk[i + 1] = esk[95 - i];
esk[i + 32] = dsk[62 - i];
esk[i + 33] = dsk[63 - i];
dsk[i + 64] = esk[30 - i];
dsk[i + 65] = esk[31 - i];
}
return( 0 );
}
__device__ void des_crypt_dev( uint32 *SK, uint8 *input, uint8 *output, int len)
{
int i;
uint32 X, Y, T;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < TD_NUM){
for(i = 0; i < len/TD_NUM; i++){
X = ( (uint32) *(input + (i*TD_NUM+tid)*8) << 24 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 1) << 16 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 2) << 8 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 3) );
Y = ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 4) << 24 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 5) << 16 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 6) << 8 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 7) );
DES_IP( X, Y );
DES_ROUND_dev( SK, Y, X ); DES_ROUND_dev( (SK + 2), X, Y );
DES_ROUND_dev( (SK + 4), Y, X ); DES_ROUND_dev( (SK + 6), X, Y );
DES_ROUND_dev( (SK + 8), Y, X ); DES_ROUND_dev( (SK + 10), X, Y );
DES_ROUND_dev( (SK + 12), Y, X ); DES_ROUND_dev( (SK + 14), X, Y );
DES_ROUND_dev( (SK + 16), Y, X ); DES_ROUND_dev( (SK + 18), X, Y );
DES_ROUND_dev( (SK + 20), Y, X ); DES_ROUND_dev( (SK + 22), X, Y );
DES_ROUND_dev( (SK + 24), Y, X ); DES_ROUND_dev( (SK + 26), X, Y );
DES_ROUND_dev( (SK + 28), Y, X ); DES_ROUND_dev( (SK + 30), X, Y );
DES_ROUND_dev( (SK + 32), X, Y ); DES_ROUND_dev( (SK + 34), Y, X );
DES_ROUND_dev( (SK + 36), X, Y ); DES_ROUND_dev( (SK + 38), Y, X );
DES_ROUND_dev( (SK + 40), X, Y ); DES_ROUND_dev( (SK + 42), Y, X );
DES_ROUND_dev( (SK + 44), X, Y ); DES_ROUND_dev( (SK + 46), Y, X );
DES_ROUND_dev( (SK + 48), X, Y ); DES_ROUND_dev( (SK + 50), Y, X );
DES_ROUND_dev( (SK + 52), X, Y ); DES_ROUND_dev( (SK + 54), Y, X );
DES_ROUND_dev( (SK + 56), X, Y ); DES_ROUND_dev( (SK + 58), Y, X );
DES_ROUND_dev( (SK + 60), X, Y ); DES_ROUND_dev( (SK + 62), Y, X );
DES_ROUND_dev( (SK + 64), Y, X ); DES_ROUND_dev( (SK + 66), X, Y );
DES_ROUND_dev( (SK + 68), Y, X ); DES_ROUND_dev( (SK + 70), X, Y );
DES_ROUND_dev( (SK + 72), Y, X ); DES_ROUND_dev( (SK + 74), X, Y );
DES_ROUND_dev( (SK + 76), Y, X ); DES_ROUND_dev( (SK + 78), X, Y );
DES_ROUND_dev( (SK + 80), Y, X ); DES_ROUND_dev( (SK + 82), X, Y );
DES_ROUND_dev( (SK + 84), Y, X ); DES_ROUND_dev( (SK + 86), X, Y );
DES_ROUND_dev( (SK + 88), Y, X ); DES_ROUND_dev( (SK + 90), X, Y );
DES_ROUND_dev( (SK + 92), Y, X ); DES_ROUND_dev( (SK + 94), X, Y );
DES_FP( Y, X );
*(output + (i*TD_NUM+tid)*8) = (uint8) ( (Y) >> 24 );
*(output + ((i*TD_NUM+tid)*8) + 1) = (uint8) ( (Y) >> 16 );
*(output + ((i*TD_NUM+tid)*8) + 2) = (uint8) ( (Y) >> 8 );
*(output + ((i*TD_NUM+tid)*8) + 3) = (uint8) ( (Y) );
*(output + ((i*TD_NUM+tid)*8) + 4) = (uint8) ( (X) >> 24 );
*(output + ((i*TD_NUM+tid)*8) + 5) = (uint8) ( (X) >> 16 );
*(output + ((i*TD_NUM+tid)*8) + 6) = (uint8) ( (X) >> 8 );
*(output + ((i*TD_NUM+tid)*8) + 7) = (uint8) ( (X) );
}
}
}
__global__ void des_encrypt_dev( uint32 *esk, uint32 *dsk, uint8 *input, uint8 *output, int len)
{
des_crypt_dev( esk, input, input, len);
des_crypt_dev( dsk, input, output, len);
}
void DES_ROUND(uint32 *SK, uint32 X, uint32 Y)
{
uint32 T;
T = *SK ^ X;
Y ^= SB8[ (T ) & 0x3F ] ^
SB6[ (T >> 8) & 0x3F ] ^
SB4[ (T >> 16) & 0x3F ] ^
SB2[ (T >> 24) & 0x3F ];
T = *SK++ ^ ((X << 28) | (X >> 4));
Y ^= SB7[ (T ) & 0x3F ] ^
SB5[ (T >> 8) & 0x3F ] ^
SB3[ (T >> 16) & 0x3F ] ^
SB1[ (T >> 24) & 0x3F ];
}
/* DES 64-bit block encryption/decryption */
void des_crypt( uint32 *SK, uint8 *input, uint8 *output, int len)
{
int i;
uint32 X, Y, T;
for(i = 0; i < len; i++){
X = ( (uint32) *(input + i*8) << 24 )
| ( (uint32) *(input + (i*8) + 1) << 16 )
| ( (uint32) *(input + (i*8) + 2) << 8 )
| ( (uint32) *(input + (i*8) + 3) );
Y = ( (uint32) *(input + (i*8) + 4) << 24 )
| ( (uint32) *(input + (i*8) + 5) << 16 )
| ( (uint32) *(input + (i*8) + 6) << 8 )
| ( (uint32) *(input + (i*8) + 7) );
DES_IP( X, Y );
DES_ROUND( SK, Y, X ); DES_ROUND( (SK + 2), X, Y );
DES_ROUND( (SK + 4), Y, X ); DES_ROUND( (SK + 6), X, Y );
DES_ROUND( (SK + 8), Y, X ); DES_ROUND( (SK + 10), X, Y );
DES_ROUND( (SK + 12), Y, X ); DES_ROUND( (SK + 14), X, Y );
DES_ROUND( (SK + 16), Y, X ); DES_ROUND( (SK + 18), X, Y );
DES_ROUND( (SK + 20), Y, X ); DES_ROUND( (SK + 22), X, Y );
DES_ROUND( (SK + 24), Y, X ); DES_ROUND( (SK + 26), X, Y );
DES_ROUND( (SK + 28), Y, X ); DES_ROUND( (SK + 30), X, Y );
DES_ROUND( (SK + 32), X, Y ); DES_ROUND( (SK + 34), Y, X );
DES_ROUND( (SK + 36), X, Y ); DES_ROUND( (SK + 38), Y, X );
DES_ROUND( (SK + 40), X, Y ); DES_ROUND( (SK + 42), Y, X );
DES_ROUND( (SK + 44), X, Y ); DES_ROUND( (SK + 46), Y, X );
DES_ROUND( (SK + 48), X, Y ); DES_ROUND( (SK + 50), Y, X );
DES_ROUND( (SK + 52), X, Y ); DES_ROUND( (SK + 54), Y, X );
DES_ROUND( (SK + 56), X, Y ); DES_ROUND( (SK + 58), Y, X );
DES_ROUND( (SK + 60), X, Y ); DES_ROUND( (SK + 62), Y, X );
DES_ROUND( (SK + 64), Y, X ); DES_ROUND( (SK + 66), X, Y );
DES_ROUND( (SK + 68), Y, X ); DES_ROUND( (SK + 70), X, Y );
DES_ROUND( (SK + 72), Y, X ); DES_ROUND( (SK + 74), X, Y );
DES_ROUND( (SK + 76), Y, X ); DES_ROUND( (SK + 78), X, Y );
DES_ROUND( (SK + 80), Y, X ); DES_ROUND( (SK + 82), X, Y );
DES_ROUND( (SK + 84), Y, X ); DES_ROUND( (SK + 86), X, Y );
DES_ROUND( (SK + 88), Y, X ); DES_ROUND( (SK + 90), X, Y );
DES_ROUND( (SK + 92), Y, X ); DES_ROUND( (SK + 94), X, Y );
DES_FP( Y, X );
*(output + i*8) = (uint8) ( (Y) >> 24 );
*(output + (i*8) + 1) = (uint8) ( (Y) >> 16 );
*(output + (i*8) + 2) = (uint8) ( (Y) >> 8 );
*(output + (i*8) + 3) = (uint8) ( (Y) );
*(output + (i*8) + 4) = (uint8) ( (X) >> 24 );
*(output + (i*8) + 5) = (uint8) ( (X) >> 16 );
*(output + (i*8) + 6) = (uint8) ( (X) >> 8 );
*(output + (i*8) + 7) = (uint8) ( (X) );
}
}
void des_encrypt( uint32 *esk, uint32 *dsk, uint8 *input, uint8 *output, int len)
{
des_crypt( esk, input, input, len);
des_crypt( dsk, input, output, len);
}
| a6b0126057cf3c45528338dd39727b3be19eaf97.cu | #include "kernel.h"
#include "packet.h"
__global__ void mult_gpu(int *A, int *B, int *C, int size){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int i, k;
int sum = 0;
if(tid < TD_NUM){
for(i = 0; i < (size*size/TD_NUM); i++){
for(k = 0; k < size; k++){
sum += A[((i*TD_NUM+tid)/size)*size+k] * B[k*size+((i*TD_NUM+tid)%size)];
}
C[((i*TD_NUM+tid)/size)*size+((i*TD_NUM+tid)%size)] = sum;
if(k == size) sum = 0;
}
}
}
void mult(int *A, int *B, int *C, int size){
int i, j, k;
int sum = 0;
for(j = 0; j < TD_NUM; j++)
for(i = 0; i < (size*size/TD_NUM); i++){
for(k = 0; k < size; k++){
sum += A[((i*TD_NUM+j)/size)*size+k] * B[k*size+((i*TD_NUM+j)%size)];
}
C[((i*TD_NUM+j)/size)*size+((i*TD_NUM+j)%size)] = sum;
if(k == size) sum = 0;
}
}
__device__ void explode ( float x, float y, int *value){
int k;
float x1;
float x2;
float y1;
float y2;
//int value;
*value = 0;
x1 = x;
y1 = y;
for ( k = 1; k <= count_max; k++ )
{
x2 = x1 * x1 - y1 * y1 + x;
y2 = 2.0 * x1 * y1 + y;
if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 )
{
*value = k;
//if(k > 1000)
//printf("k:%d\n", k);
break;
}
x1 = x2;
y1 = y2;
}
}
__global__ void get_pixel(int *count, float *index){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int i;
float x, y;
if(tid < TD_NUM){
for(i = 0; i < (n*n/TD_NUM); i++){
//for(j = 0; j < TD_NUM; j++){
//for ( i = 0; i < n; i++ ){
//for ( j = 0; j < n; j++ ){
x = ( ( float ) ( (i*TD_NUM+tid)%n ) * (x_max + *index)
+ ( float ) ( n - ((i*TD_NUM+tid)%n) - 1 ) * (x_min + *index) )
/ ( float ) ( n - 1 );
y = ( ( float ) ( (i*TD_NUM+tid)/n ) * (y_max + *index)
+ ( float ) ( n - ((i*TD_NUM+tid)/n) - 1 ) * (y_min + *index) )
/ ( float ) ( n - 1 );
explode ( x, y, &count[((i*TD_NUM+tid)/n) + ((i*TD_NUM+tid)%n) * n] );
}
}
}
void h_explode ( float x, float y, int *value){
int k;
float x1;
float x2;
float y1;
float y2;
//int value;
*value = 0;
x1 = x;
y1 = y;
for ( k = 1; k <= count_max; k++ )
{
x2 = x1 * x1 - y1 * y1 + x;
y2 = 2.0 * x1 * y1 + y;
if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 )
{
*value = k;
//if(k > 1000)
//printf("k:%d\n", k);
break;
}
x1 = x2;
y1 = y2;
}
}
void h_get_pixel(int *count, float index){
int i, j;
float x, y;
for ( i = 0; i < n; i++ )
{
for ( j = 0; j < n; j++ )
{
x = ( ( float ) ( j ) * (x_max + index)
+ ( float ) ( n - j - 1 ) * (x_min + index) )
/ ( float ) ( n - 1 );
y = ( ( float ) ( i ) * (y_max + index)
+ ( float ) ( n - i - 1 ) * (y_min + index) )
/ ( float ) ( n - 1 );
h_explode ( x, y, &count[i + j * n] );
}
}
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j, k;
//convolving H
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_H[j*TD_NUM+tid] += (r[(j*TD_NUM+tid)-k]*H[k]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM; j++)
Vect_Dn[(j*TD_NUM+tid)]=Vect_H[(j*TD_NUM+tid)*N_samp];
//Up Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM;j++)
Vect_Up[(j*TD_NUM+tid)*N_samp]=Vect_Dn[(j*TD_NUM+tid)];
__syncthreads();
//convolving F
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_F[j*TD_NUM+tid]+=(F[k]*Vect_Up[(j*TD_NUM+tid)-k]);
}
}
}
}
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F){
int j, k, p;
//convolving H
for (j=0; j< N_sim; j++)
{
//for (k=0; ((k<N_col) & ((j-k)>=0)); k++)
for(k = 0; k < N_col; k++){
if((j-k)>=0){
//Vect_H[j]+=H[k]*r[j-k];
Vect_H[j] += (r[j-k]*H[k]);
}
}
}
//Down Sampling
for (j=0; j < N_sim/N_samp; j++)
Vect_Dn[j]=Vect_H[j*N_samp];
//Up Sampling
for (j=0; j < N_sim/N_samp;j++)
Vect_Up[j*N_samp]=Vect_Dn[j];
//convolving F
for (j=0; j< N_sim; j++)
{
//for (k=0; ((k<N_col) & ((j-k)>=0)); k++)
for(k = 0; k < N_col; k++){
if((j-k)>=0){
//Vect_H[j]+=H[k]*r[j-k];
Vect_F[j]+=(F[k]*Vect_Up[j-k]);
}
}
}
}
__device__ void DES_ROUND_dev(uint32 *SK, uint32 X, uint32 Y)
{
uint32 T;
T = *SK ^ X;
Y ^= SB8[ (T ) & 0x3F ] ^
SB6[ (T >> 8) & 0x3F ] ^
SB4[ (T >> 16) & 0x3F ] ^
SB2[ (T >> 24) & 0x3F ];
T = *SK++ ^ ((X << 28) | (X >> 4));
Y ^= SB7[ (T ) & 0x3F ] ^
SB5[ (T >> 8) & 0x3F ] ^
SB3[ (T >> 16) & 0x3F ] ^
SB1[ (T >> 24) & 0x3F ];
}
/* DES key schedule */
int des_main_ks( uint32 *SK, uint8 *key )
{
int i;
uint32 X, Y, T;
GET_UINT32( X, key, 0 );
GET_UINT32( Y, key, 4 );
/* Permuted Choice 1 */
T = ((Y >> 4) ^ X) & 0x0F0F0F0F; X ^= T; Y ^= (T << 4);
T = ((Y ) ^ X) & 0x10101010; X ^= T; Y ^= (T );
X = (LHs[ (X ) & 0xF] << 3) | (LHs[ (X >> 8) & 0xF ] << 2)
| (LHs[ (X >> 16) & 0xF] << 1) | (LHs[ (X >> 24) & 0xF ] )
| (LHs[ (X >> 5) & 0xF] << 7) | (LHs[ (X >> 13) & 0xF ] << 6)
| (LHs[ (X >> 21) & 0xF] << 5) | (LHs[ (X >> 29) & 0xF ] << 4);
Y = (RHs[ (Y >> 1) & 0xF] << 3) | (RHs[ (Y >> 9) & 0xF ] << 2)
| (RHs[ (Y >> 17) & 0xF] << 1) | (RHs[ (Y >> 25) & 0xF ] )
| (RHs[ (Y >> 4) & 0xF] << 7) | (RHs[ (Y >> 12) & 0xF ] << 6)
| (RHs[ (Y >> 20) & 0xF] << 5) | (RHs[ (Y >> 28) & 0xF ] << 4);
X &= 0x0FFFFFFF;
Y &= 0x0FFFFFFF;
/* calculate subkeys */
for( i = 0; i < 16; i++ )
{
if( i < 2 || i == 8 || i == 15 )
{
X = ((X << 1) | (X >> 27)) & 0x0FFFFFFF;
Y = ((Y << 1) | (Y >> 27)) & 0x0FFFFFFF;
}
else
{
X = ((X << 2) | (X >> 26)) & 0x0FFFFFFF;
Y = ((Y << 2) | (Y >> 26)) & 0x0FFFFFFF;
}
*SK++ = ((X << 4) & 0x24000000) | ((X << 28) & 0x10000000)
| ((X << 14) & 0x08000000) | ((X << 18) & 0x02080000)
| ((X << 6) & 0x01000000) | ((X << 9) & 0x00200000)
| ((X >> 1) & 0x00100000) | ((X << 10) & 0x00040000)
| ((X << 2) & 0x00020000) | ((X >> 10) & 0x00010000)
| ((Y >> 13) & 0x00002000) | ((Y >> 4) & 0x00001000)
| ((Y << 6) & 0x00000800) | ((Y >> 1) & 0x00000400)
| ((Y >> 14) & 0x00000200) | ((Y ) & 0x00000100)
| ((Y >> 5) & 0x00000020) | ((Y >> 10) & 0x00000010)
| ((Y >> 3) & 0x00000008) | ((Y >> 18) & 0x00000004)
| ((Y >> 26) & 0x00000002) | ((Y >> 24) & 0x00000001);
*SK++ = ((X << 15) & 0x20000000) | ((X << 17) & 0x10000000)
| ((X << 10) & 0x08000000) | ((X << 22) & 0x04000000)
| ((X >> 2) & 0x02000000) | ((X << 1) & 0x01000000)
| ((X << 16) & 0x00200000) | ((X << 11) & 0x00100000)
| ((X << 3) & 0x00080000) | ((X >> 6) & 0x00040000)
| ((X << 15) & 0x00020000) | ((X >> 4) & 0x00010000)
| ((Y >> 2) & 0x00002000) | ((Y << 8) & 0x00001000)
| ((Y >> 14) & 0x00000808) | ((Y >> 9) & 0x00000400)
| ((Y ) & 0x00000200) | ((Y << 7) & 0x00000100)
| ((Y >> 7) & 0x00000020) | ((Y >> 3) & 0x00000011)
| ((Y << 2) & 0x00000004) | ((Y >> 21) & 0x00000002);
}
return( 0 );
}
int des_set_key( uint32 *esk, uint32 *dsk, uint8 key1[8],
uint8 key2[8], uint8 key3[8])
{
int i;
/* setup encryption subkeys */
des_main_ks( esk, key1 );
des_main_ks( dsk + 32, key2 );
des_main_ks( esk + 64, key3 );
/* setup decryption subkeys */
for( i = 0; i < 32; i += 2 )
{
dsk[i] = esk[94 - i];
dsk[i + 1] = esk[95 - i];
esk[i + 32] = dsk[62 - i];
esk[i + 33] = dsk[63 - i];
dsk[i + 64] = esk[30 - i];
dsk[i + 65] = esk[31 - i];
}
return( 0 );
}
__device__ void des_crypt_dev( uint32 *SK, uint8 *input, uint8 *output, int len)
{
int i;
uint32 X, Y, T;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < TD_NUM){
for(i = 0; i < len/TD_NUM; i++){
X = ( (uint32) *(input + (i*TD_NUM+tid)*8) << 24 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 1) << 16 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 2) << 8 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 3) );
Y = ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 4) << 24 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 5) << 16 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 6) << 8 )
| ( (uint32) *(input + ((i*TD_NUM+tid)*8) + 7) );
DES_IP( X, Y );
DES_ROUND_dev( SK, Y, X ); DES_ROUND_dev( (SK + 2), X, Y );
DES_ROUND_dev( (SK + 4), Y, X ); DES_ROUND_dev( (SK + 6), X, Y );
DES_ROUND_dev( (SK + 8), Y, X ); DES_ROUND_dev( (SK + 10), X, Y );
DES_ROUND_dev( (SK + 12), Y, X ); DES_ROUND_dev( (SK + 14), X, Y );
DES_ROUND_dev( (SK + 16), Y, X ); DES_ROUND_dev( (SK + 18), X, Y );
DES_ROUND_dev( (SK + 20), Y, X ); DES_ROUND_dev( (SK + 22), X, Y );
DES_ROUND_dev( (SK + 24), Y, X ); DES_ROUND_dev( (SK + 26), X, Y );
DES_ROUND_dev( (SK + 28), Y, X ); DES_ROUND_dev( (SK + 30), X, Y );
DES_ROUND_dev( (SK + 32), X, Y ); DES_ROUND_dev( (SK + 34), Y, X );
DES_ROUND_dev( (SK + 36), X, Y ); DES_ROUND_dev( (SK + 38), Y, X );
DES_ROUND_dev( (SK + 40), X, Y ); DES_ROUND_dev( (SK + 42), Y, X );
DES_ROUND_dev( (SK + 44), X, Y ); DES_ROUND_dev( (SK + 46), Y, X );
DES_ROUND_dev( (SK + 48), X, Y ); DES_ROUND_dev( (SK + 50), Y, X );
DES_ROUND_dev( (SK + 52), X, Y ); DES_ROUND_dev( (SK + 54), Y, X );
DES_ROUND_dev( (SK + 56), X, Y ); DES_ROUND_dev( (SK + 58), Y, X );
DES_ROUND_dev( (SK + 60), X, Y ); DES_ROUND_dev( (SK + 62), Y, X );
DES_ROUND_dev( (SK + 64), Y, X ); DES_ROUND_dev( (SK + 66), X, Y );
DES_ROUND_dev( (SK + 68), Y, X ); DES_ROUND_dev( (SK + 70), X, Y );
DES_ROUND_dev( (SK + 72), Y, X ); DES_ROUND_dev( (SK + 74), X, Y );
DES_ROUND_dev( (SK + 76), Y, X ); DES_ROUND_dev( (SK + 78), X, Y );
DES_ROUND_dev( (SK + 80), Y, X ); DES_ROUND_dev( (SK + 82), X, Y );
DES_ROUND_dev( (SK + 84), Y, X ); DES_ROUND_dev( (SK + 86), X, Y );
DES_ROUND_dev( (SK + 88), Y, X ); DES_ROUND_dev( (SK + 90), X, Y );
DES_ROUND_dev( (SK + 92), Y, X ); DES_ROUND_dev( (SK + 94), X, Y );
DES_FP( Y, X );
*(output + (i*TD_NUM+tid)*8) = (uint8) ( (Y) >> 24 );
*(output + ((i*TD_NUM+tid)*8) + 1) = (uint8) ( (Y) >> 16 );
*(output + ((i*TD_NUM+tid)*8) + 2) = (uint8) ( (Y) >> 8 );
*(output + ((i*TD_NUM+tid)*8) + 3) = (uint8) ( (Y) );
*(output + ((i*TD_NUM+tid)*8) + 4) = (uint8) ( (X) >> 24 );
*(output + ((i*TD_NUM+tid)*8) + 5) = (uint8) ( (X) >> 16 );
*(output + ((i*TD_NUM+tid)*8) + 6) = (uint8) ( (X) >> 8 );
*(output + ((i*TD_NUM+tid)*8) + 7) = (uint8) ( (X) );
}
}
}
__global__ void des_encrypt_dev( uint32 *esk, uint32 *dsk, uint8 *input, uint8 *output, int len)
{
des_crypt_dev( esk, input, input, len);
des_crypt_dev( dsk, input, output, len);
}
void DES_ROUND(uint32 *SK, uint32 X, uint32 Y)
{
uint32 T;
T = *SK ^ X;
Y ^= SB8[ (T ) & 0x3F ] ^
SB6[ (T >> 8) & 0x3F ] ^
SB4[ (T >> 16) & 0x3F ] ^
SB2[ (T >> 24) & 0x3F ];
T = *SK++ ^ ((X << 28) | (X >> 4));
Y ^= SB7[ (T ) & 0x3F ] ^
SB5[ (T >> 8) & 0x3F ] ^
SB3[ (T >> 16) & 0x3F ] ^
SB1[ (T >> 24) & 0x3F ];
}
/* DES 64-bit block encryption/decryption */
void des_crypt( uint32 *SK, uint8 *input, uint8 *output, int len)
{
int i;
uint32 X, Y, T;
for(i = 0; i < len; i++){
X = ( (uint32) *(input + i*8) << 24 )
| ( (uint32) *(input + (i*8) + 1) << 16 )
| ( (uint32) *(input + (i*8) + 2) << 8 )
| ( (uint32) *(input + (i*8) + 3) );
Y = ( (uint32) *(input + (i*8) + 4) << 24 )
| ( (uint32) *(input + (i*8) + 5) << 16 )
| ( (uint32) *(input + (i*8) + 6) << 8 )
| ( (uint32) *(input + (i*8) + 7) );
DES_IP( X, Y );
DES_ROUND( SK, Y, X ); DES_ROUND( (SK + 2), X, Y );
DES_ROUND( (SK + 4), Y, X ); DES_ROUND( (SK + 6), X, Y );
DES_ROUND( (SK + 8), Y, X ); DES_ROUND( (SK + 10), X, Y );
DES_ROUND( (SK + 12), Y, X ); DES_ROUND( (SK + 14), X, Y );
DES_ROUND( (SK + 16), Y, X ); DES_ROUND( (SK + 18), X, Y );
DES_ROUND( (SK + 20), Y, X ); DES_ROUND( (SK + 22), X, Y );
DES_ROUND( (SK + 24), Y, X ); DES_ROUND( (SK + 26), X, Y );
DES_ROUND( (SK + 28), Y, X ); DES_ROUND( (SK + 30), X, Y );
DES_ROUND( (SK + 32), X, Y ); DES_ROUND( (SK + 34), Y, X );
DES_ROUND( (SK + 36), X, Y ); DES_ROUND( (SK + 38), Y, X );
DES_ROUND( (SK + 40), X, Y ); DES_ROUND( (SK + 42), Y, X );
DES_ROUND( (SK + 44), X, Y ); DES_ROUND( (SK + 46), Y, X );
DES_ROUND( (SK + 48), X, Y ); DES_ROUND( (SK + 50), Y, X );
DES_ROUND( (SK + 52), X, Y ); DES_ROUND( (SK + 54), Y, X );
DES_ROUND( (SK + 56), X, Y ); DES_ROUND( (SK + 58), Y, X );
DES_ROUND( (SK + 60), X, Y ); DES_ROUND( (SK + 62), Y, X );
DES_ROUND( (SK + 64), Y, X ); DES_ROUND( (SK + 66), X, Y );
DES_ROUND( (SK + 68), Y, X ); DES_ROUND( (SK + 70), X, Y );
DES_ROUND( (SK + 72), Y, X ); DES_ROUND( (SK + 74), X, Y );
DES_ROUND( (SK + 76), Y, X ); DES_ROUND( (SK + 78), X, Y );
DES_ROUND( (SK + 80), Y, X ); DES_ROUND( (SK + 82), X, Y );
DES_ROUND( (SK + 84), Y, X ); DES_ROUND( (SK + 86), X, Y );
DES_ROUND( (SK + 88), Y, X ); DES_ROUND( (SK + 90), X, Y );
DES_ROUND( (SK + 92), Y, X ); DES_ROUND( (SK + 94), X, Y );
DES_FP( Y, X );
*(output + i*8) = (uint8) ( (Y) >> 24 );
*(output + (i*8) + 1) = (uint8) ( (Y) >> 16 );
*(output + (i*8) + 2) = (uint8) ( (Y) >> 8 );
*(output + (i*8) + 3) = (uint8) ( (Y) );
*(output + (i*8) + 4) = (uint8) ( (X) >> 24 );
*(output + (i*8) + 5) = (uint8) ( (X) >> 16 );
*(output + (i*8) + 6) = (uint8) ( (X) >> 8 );
*(output + (i*8) + 7) = (uint8) ( (X) );
}
}
void des_encrypt( uint32 *esk, uint32 *dsk, uint8 *input, uint8 *output, int len)
{
des_crypt( esk, input, input, len);
des_crypt( dsk, input, output, len);
}
|
843d35ab684c9a9da1971f2df62bac4b74d4b5cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "brent_kung_scan_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
float *Y = NULL;
hipMalloc(&Y, XSIZE*YSIZE);
int InputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
brent_kung_scan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,InputSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
brent_kung_scan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,InputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
brent_kung_scan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,InputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 843d35ab684c9a9da1971f2df62bac4b74d4b5cd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "brent_kung_scan_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
float *Y = NULL;
cudaMalloc(&Y, XSIZE*YSIZE);
int InputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
brent_kung_scan_kernel<<<gridBlock,threadBlock>>>(X,Y,InputSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
brent_kung_scan_kernel<<<gridBlock,threadBlock>>>(X,Y,InputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
brent_kung_scan_kernel<<<gridBlock,threadBlock>>>(X,Y,InputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bce19e6dea63433e0fed5bd7553566b565f612fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
/************* Functions **************/
__global__ void x5(int* x6, int* x7, int* x8, int x9, int x10, int x11) {
int x12 = gridDim.x * blockDim.x;
int x13 = threadIdx.x + blockIdx.x * blockDim.x;
while (x13 < x11) {
int x14 = x13;
if (x6[x14] >= x9 && x6[x14] <= x10) x7[x14] = x7[x14] + x8[x14];
x13 = x13 + x12;
}
}
__global__ void x15(int* x16, int* x17, int* x18, int x19, int x20, int x21) {
int x22 = gridDim.x * blockDim.x;
int x23 = threadIdx.x + blockIdx.x * blockDim.x;
while (x23 < x21) {
int x24 = x23;
if (x16[x24] < x19 || x16[x24] > x20) x17[x24] = 0;
x23 = x23 + x22;
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
int x1[5] = { 1, 2, 3, 4, 5 };
int* x2 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x2, (size_t)(5 * sizeof(int))));
CUDA_CALL(hipMemcpy(x2, x1, (size_t)(5 * sizeof(int)), hipMemcpyHostToDevice));
int* x3 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x3, (size_t)(5 * sizeof(int))));
hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x3, 8, 5);
int* x4 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x4, (size_t)(5 * sizeof(int))));
hipLaunchKernelGGL(( x5), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x2, x4, x3, -2, 2, 5);
printf("%d %d %d %d %d", x4[0], x4[1], x4[2], x4[3], x4[4]);
hipLaunchKernelGGL(( x15), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x2, x3, x3, -2, 2, 5);
printf("%d %d %d %d %d", x3[0], x3[1], x3[2], x3[3], x3[4]);
CUDA_CALL(hipFree(x2));
CUDA_CALL(hipFree(x3));
CUDA_CALL(hipFree(x4));
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| bce19e6dea63433e0fed5bd7553566b565f612fc.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
/************* Functions **************/
__global__ void x5(int* x6, int* x7, int* x8, int x9, int x10, int x11) {
int x12 = gridDim.x * blockDim.x;
int x13 = threadIdx.x + blockIdx.x * blockDim.x;
while (x13 < x11) {
int x14 = x13;
if (x6[x14] >= x9 && x6[x14] <= x10) x7[x14] = x7[x14] + x8[x14];
x13 = x13 + x12;
}
}
__global__ void x15(int* x16, int* x17, int* x18, int x19, int x20, int x21) {
int x22 = gridDim.x * blockDim.x;
int x23 = threadIdx.x + blockIdx.x * blockDim.x;
while (x23 < x21) {
int x24 = x23;
if (x16[x24] < x19 || x16[x24] > x20) x17[x24] = 0;
x23 = x23 + x22;
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
int x1[5] = { 1, 2, 3, 4, 5 };
int* x2 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x2, (size_t)(5 * sizeof(int))));
CUDA_CALL(cudaMemcpy(x2, x1, (size_t)(5 * sizeof(int)), cudaMemcpyHostToDevice));
int* x3 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x3, (size_t)(5 * sizeof(int))));
arrayFill<<<28, 512>>>(x3, 8, 5);
int* x4 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x4, (size_t)(5 * sizeof(int))));
x5<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x2, x4, x3, -2, 2, 5);
printf("%d %d %d %d %d", x4[0], x4[1], x4[2], x4[3], x4[4]);
x15<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x2, x3, x3, -2, 2, 5);
printf("%d %d %d %d %d", x3[0], x3[1], x3[2], x3[3], x3[4]);
CUDA_CALL(cudaFree(x2));
CUDA_CALL(cudaFree(x3));
CUDA_CALL(cudaFree(x4));
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
14792382c0b6e14900fbececb41902e5dae624a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatMultKernel.h"
// Define a gpu kernel to perform matrix multiplication
// of B x C = A.
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){
// matrix blocks
float *Asub, *Bsub, *Csub;
// Putting these into registers speeds access.
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
// Each THREAD BLOCK computes one sub matrix Csub of C
// EACH THREAD creates its own matrix descriptor Csub
Asub = &A.elements[A.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * block_col];
// Each thread computes one element of Csub in its copy of CValue
float Avalue1 = 0;
float Avalue2 = 0;
float Avalue3 = 0;
float Avalue4 = 0;
// Loop over all sub matrices in block_row of A and block_col of B
// required to compute Csub. Block multiply each pair of sub matrices
// and accumulate results
for (int m = 0; m < (B.width / FOOTPRINT_SIZE); ++m){
// Get Bsub and Csub descriptors
//for(int )
Bsub = &B.elements[B.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * m];
Csub = &C.elements[C.stride * FOOTPRINT_SIZE * m + FOOTPRINT_SIZE * block_col];
// Copy ELEMENTS OF ASub and Bsub into shared memory
// EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub
// Notice: it does not need to be the element it requires to
// compute its Avalue, as long as all elements are
// collaboratively read.
// Notice: every thread declares shared_A and shared_B in shared memory
// even though a thread block has only one shared_A and one shared_B
__shared__ float shared_B[FOOTPRINT_SIZE][FOOTPRINT_SIZE];
__shared__ float shared_C[FOOTPRINT_SIZE][FOOTPRINT_SIZE];
// Each thread copies just one element of shared_B and one element of shared_C
shared_B[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col];
shared_C[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col];
shared_B[thread_row+BLOCK_SIZE][thread_col] = Asub[(thread_row+BLOCK_SIZE )* A.stride + thread_col];
shared_B[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Asub[(thread_row+BLOCK_SIZE )* A.stride + (thread_col+BLOCK_SIZE)];
shared_B[thread_row][thread_col+BLOCK_SIZE] = Asub[thread_row* A.stride + (thread_col+BLOCK_SIZE)];
shared_C[thread_row+BLOCK_SIZE][thread_col] = Bsub[(thread_row+BLOCK_SIZE) * B.stride + thread_col];
shared_C[thread_row][thread_col+BLOCK_SIZE] = Bsub[thread_row * B.stride + (thread_col+BLOCK_SIZE)];
shared_C[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Bsub[(thread_row+BLOCK_SIZE) * B.stride + (thread_col+BLOCK_SIZE)];
// Synchronize to ensure all elements are read
__syncthreads();
// Do an inproduct of one row of shared_A and one col of shared_B
// computing one Cvalue by accumulation
#pragma unroll
for(int e=0; e<FOOTPRINT_SIZE; ++e){
Avalue1 += shared_B[thread_row+BLOCK_SIZE][e] * shared_C[e][thread_col];
Avalue2 += shared_B[thread_row][e] * shared_C[e][thread_col];
Avalue3 += shared_B[thread_row][e] * shared_C[e][thread_col+BLOCK_SIZE];
Avalue4 += shared_B[thread_row+BLOCK_SIZE][e] * shared_C[e][thread_col+BLOCK_SIZE];
}
// Synchronize to ensure all Avalues have been incremented
// before reading in the next shared_B AND shared_C BLOCKS
__syncthreads();
}
// Write Asub to GLOBAL memory.
// Each thread writes its own cell value.
Asub[thread_row * A.stride + thread_col] = Avalue2;
Asub[(thread_row+BLOCK_SIZE) * A.stride +thread_col] = Avalue1;
Asub[thread_row * A.stride + (thread_col+BLOCK_SIZE)] = Avalue3;
Asub[(thread_row+BLOCK_SIZE) *A.stride + (thread_col+BLOCK_SIZE)] = Avalue4;
}
| 14792382c0b6e14900fbececb41902e5dae624a8.cu | #include "MatMultKernel.h"
// Define a gpu kernel to perform matrix multiplication
// of B x C = A.
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){
// matrix blocks
float *Asub, *Bsub, *Csub;
// Putting these into registers speeds access.
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
// Each THREAD BLOCK computes one sub matrix Csub of C
// EACH THREAD creates its own matrix descriptor Csub
Asub = &A.elements[A.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * block_col];
// Each thread computes one element of Csub in its copy of CValue
float Avalue1 = 0;
float Avalue2 = 0;
float Avalue3 = 0;
float Avalue4 = 0;
// Loop over all sub matrices in block_row of A and block_col of B
// required to compute Csub. Block multiply each pair of sub matrices
// and accumulate results
for (int m = 0; m < (B.width / FOOTPRINT_SIZE); ++m){
// Get Bsub and Csub descriptors
//for(int )
Bsub = &B.elements[B.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * m];
Csub = &C.elements[C.stride * FOOTPRINT_SIZE * m + FOOTPRINT_SIZE * block_col];
// Copy ELEMENTS OF ASub and Bsub into shared memory
// EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub
// Notice: it does not need to be the element it requires to
// compute its Avalue, as long as all elements are
// collaboratively read.
// Notice: every thread declares shared_A and shared_B in shared memory
// even though a thread block has only one shared_A and one shared_B
__shared__ float shared_B[FOOTPRINT_SIZE][FOOTPRINT_SIZE];
__shared__ float shared_C[FOOTPRINT_SIZE][FOOTPRINT_SIZE];
// Each thread copies just one element of shared_B and one element of shared_C
shared_B[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col];
shared_C[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col];
shared_B[thread_row+BLOCK_SIZE][thread_col] = Asub[(thread_row+BLOCK_SIZE )* A.stride + thread_col];
shared_B[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Asub[(thread_row+BLOCK_SIZE )* A.stride + (thread_col+BLOCK_SIZE)];
shared_B[thread_row][thread_col+BLOCK_SIZE] = Asub[thread_row* A.stride + (thread_col+BLOCK_SIZE)];
shared_C[thread_row+BLOCK_SIZE][thread_col] = Bsub[(thread_row+BLOCK_SIZE) * B.stride + thread_col];
shared_C[thread_row][thread_col+BLOCK_SIZE] = Bsub[thread_row * B.stride + (thread_col+BLOCK_SIZE)];
shared_C[thread_row+BLOCK_SIZE][thread_col+BLOCK_SIZE] = Bsub[(thread_row+BLOCK_SIZE) * B.stride + (thread_col+BLOCK_SIZE)];
// Synchronize to ensure all elements are read
__syncthreads();
// Do an inproduct of one row of shared_A and one col of shared_B
// computing one Cvalue by accumulation
#pragma unroll
for(int e=0; e<FOOTPRINT_SIZE; ++e){
Avalue1 += shared_B[thread_row+BLOCK_SIZE][e] * shared_C[e][thread_col];
Avalue2 += shared_B[thread_row][e] * shared_C[e][thread_col];
Avalue3 += shared_B[thread_row][e] * shared_C[e][thread_col+BLOCK_SIZE];
Avalue4 += shared_B[thread_row+BLOCK_SIZE][e] * shared_C[e][thread_col+BLOCK_SIZE];
}
// Synchronize to ensure all Avalues have been incremented
// before reading in the next shared_B AND shared_C BLOCKS
__syncthreads();
}
// Write Asub to GLOBAL memory.
// Each thread writes its own cell value.
Asub[thread_row * A.stride + thread_col] = Avalue2;
Asub[(thread_row+BLOCK_SIZE) * A.stride +thread_col] = Avalue1;
Asub[thread_row * A.stride + (thread_col+BLOCK_SIZE)] = Avalue3;
Asub[(thread_row+BLOCK_SIZE) *A.stride + (thread_col+BLOCK_SIZE)] = Avalue4;
}
|
17c6447989e705d68a524bbc80c84fb18ad4343e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2008 Daniel Cabrini Hauagge
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#include <GaussianPyramidKernel.hpp>
static texture<float, 2> _levelTexture;
#define KERNEL_SIZE 5
#define HALF_KERNEL 2
#define NORM_FACTOR 0.00390625 // 1.0/(16.0^2)
//
// Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16
//
__global__
void
gaussianPyramidDownsampleKernel(float *downLevel,
size_t downLevelPitch,
unsigned int downWidth, unsigned int downHeight)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
//if(x < downWidth and y < downHeight) {
float buf[KERNEL_SIZE];
float u0 = (2 * x) - HALF_KERNEL;
float v0 = (2 * y) - HALF_KERNEL;
for(int i = 0; i < KERNEL_SIZE; i++) {
buf[i] =
(tex2D(_levelTexture, u0 , v0 + i) + tex2D(_levelTexture, u0 + 4, v0 + i)) +
4 * (tex2D(_levelTexture, u0 + 1, v0 + i) + tex2D(_levelTexture, u0 + 3, v0 + i)) +
6 * tex2D(_levelTexture, u0 + 2, v0 + i);
}
downLevel[y * downLevelPitch + x] = (buf[0] + buf[4] + 4*(buf[1] + buf[3]) + 6 * buf[2]) * NORM_FACTOR;
//}
}
void
gaussianPyramidDownsample(hipArray *level,
float *downLevel,
size_t downLevelPitch,
unsigned int downWidth, unsigned int downHeight)
{
dim3 grid(iDivUp(downWidth, CB_TILE_W), iDivUp(downHeight, CB_TILE_H));
dim3 threads(CB_TILE_W, CB_TILE_H);
// Bind the array to the texture
_levelTexture.normalized = false;
_levelTexture.filterMode = hipFilterModePoint;
_levelTexture.addressMode[0] = hipAddressModeClamp;
_levelTexture.addressMode[1] = hipAddressModeClamp;
CUDA_SAFE_CALL( hipBindTextureToArray(_levelTexture, level) );
hipLaunchKernelGGL(( gaussianPyramidDownsampleKernel), dim3(grid) , dim3(threads) , 0, 0, downLevel, downLevelPitch/sizeof(float),
downWidth, downHeight);
CUDA_SAFE_CALL( hipUnbindTexture(_levelTexture));
CUT_CHECK_ERROR( __PRETTY_FUNCTION__ );
CUDA_SAFE_CALL( hipDeviceSynchronize());
}
| 17c6447989e705d68a524bbc80c84fb18ad4343e.cu | // Copyright (c) 2008 Daniel Cabrini Hauagge
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#include <GaussianPyramidKernel.hpp>
static texture<float, 2> _levelTexture;
#define KERNEL_SIZE 5
#define HALF_KERNEL 2
#define NORM_FACTOR 0.00390625 // 1.0/(16.0^2)
//
// Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16
//
__global__
void
gaussianPyramidDownsampleKernel(float *downLevel,
size_t downLevelPitch,
unsigned int downWidth, unsigned int downHeight)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
//if(x < downWidth and y < downHeight) {
float buf[KERNEL_SIZE];
float u0 = (2 * x) - HALF_KERNEL;
float v0 = (2 * y) - HALF_KERNEL;
for(int i = 0; i < KERNEL_SIZE; i++) {
buf[i] =
(tex2D(_levelTexture, u0 , v0 + i) + tex2D(_levelTexture, u0 + 4, v0 + i)) +
4 * (tex2D(_levelTexture, u0 + 1, v0 + i) + tex2D(_levelTexture, u0 + 3, v0 + i)) +
6 * tex2D(_levelTexture, u0 + 2, v0 + i);
}
downLevel[y * downLevelPitch + x] = (buf[0] + buf[4] + 4*(buf[1] + buf[3]) + 6 * buf[2]) * NORM_FACTOR;
//}
}
void
gaussianPyramidDownsample(cudaArray *level,
float *downLevel,
size_t downLevelPitch,
unsigned int downWidth, unsigned int downHeight)
{
dim3 grid(iDivUp(downWidth, CB_TILE_W), iDivUp(downHeight, CB_TILE_H));
dim3 threads(CB_TILE_W, CB_TILE_H);
// Bind the array to the texture
_levelTexture.normalized = false;
_levelTexture.filterMode = cudaFilterModePoint;
_levelTexture.addressMode[0] = cudaAddressModeClamp;
_levelTexture.addressMode[1] = cudaAddressModeClamp;
CUDA_SAFE_CALL( cudaBindTextureToArray(_levelTexture, level) );
gaussianPyramidDownsampleKernel<<< grid , threads >>>(downLevel, downLevelPitch/sizeof(float),
downWidth, downHeight);
CUDA_SAFE_CALL( cudaUnbindTexture(_levelTexture));
CUT_CHECK_ERROR( __PRETTY_FUNCTION__ );
CUDA_SAFE_CALL( cudaThreadSynchronize());
}
|
6f698b1874374fd844f701230c6be4116041115d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* inference-101
*/
#include "cudaNormalize.h"
#include "cudaMath.h"
// gpuNormalize
template <typename T>
__global__ void gpuNormalize( T* input, T* output, int width, int height, float scaling_factor )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const T px = input[ y * width + x ];
output[y*width+x] = make_float4(px.x * scaling_factor,
px.y * scaling_factor,
px.z * scaling_factor,
px.w * scaling_factor);
}
// cudaNormalizeRGBA
hipError_t cudaNormalizeRGBA( float4* input, const float2& input_range,
float4* output, const float2& output_range,
size_t width, size_t height )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return hipErrorInvalidValue;
const float multiplier = output_range.y / input_range.y;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
hipLaunchKernelGGL(( gpuNormalize<float4>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, multiplier);
return CUDA(hipGetLastError());
}
| 6f698b1874374fd844f701230c6be4116041115d.cu | /*
* inference-101
*/
#include "cudaNormalize.h"
#include "cudaMath.h"
// gpuNormalize
template <typename T>
__global__ void gpuNormalize( T* input, T* output, int width, int height, float scaling_factor )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const T px = input[ y * width + x ];
output[y*width+x] = make_float4(px.x * scaling_factor,
px.y * scaling_factor,
px.z * scaling_factor,
px.w * scaling_factor);
}
// cudaNormalizeRGBA
cudaError_t cudaNormalizeRGBA( float4* input, const float2& input_range,
float4* output, const float2& output_range,
size_t width, size_t height )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return cudaErrorInvalidValue;
const float multiplier = output_range.y / input_range.y;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y));
gpuNormalize<float4><<<gridDim, blockDim>>>(input, output, width, height, multiplier);
return CUDA(cudaGetLastError());
}
|
4f6e5cb491cb6bdc29093aa1b20016f21b75df92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "Utilities.cuh"
#define BLOCKSIZE 256
/******************************/
/* SEED INITIALIZATION KERNEL */
/******************************/
__global__ void cuRAND_setup_kernel(unsigned long seed, hiprandState_t __restrict__ state, const int N) {
int tid = threadIdx.x + blockIdx.x blockDim.x;
// --- Each thread gets the same seed (seed), a different sequence number (tid), no offset (0)
if (tid < N) hiprand_init(seed, tid, 0, &state[tid]);
}
/*********************/
/* GENERATION KERNEL */
/*********************/
__global__ void cuRAND_generation_kernel(hiprandState_t __restrict__ state, int __restrict__ d_random_numbers, const int N) {
int tid = threadIdx.x + blockIdx.x blockDim.x;
if (tid < N) {
hiprandState_t localState = state[tid];
d_random_numbers[tid] = hiprand(&localState) % 64;
state[tid] = localState;
}
}
/********/
/* MAIN */
/********/
int main() {
const int N = 10;
int *h_random_numbers = (int*)malloc(N * sizeof(int));
int *d_random_numbers; gpuErrchk(hipMalloc((void**)&d_random_numbers, N * sizeof(int)));
hiprandState_t devStates; gpuErrchk(hipMalloc((void**)&devStates, N * sizeof(devStates)));
hipLaunchKernelGGL(( cuRAND_setup_kernel), dim3(iDivUp(N, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, 1234, devStates, N);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( cuRAND_generation_kernel), dim3(iDivUp(N, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, devStates, d_random_numbers, N);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_random_numbers, d_random_numbers, N * sizeof(int), hipMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("%i %i\n", i, h_random_numbers[i]);
}
| 4f6e5cb491cb6bdc29093aa1b20016f21b75df92.cu | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include "Utilities.cuh"
#define BLOCKSIZE 256
/******************************/
/* SEED INITIALIZATION KERNEL */
/******************************/
__global__ void cuRAND_setup_kernel(unsigned long seed, curandState ∗ __restrict__ state, const int N) {
int tid = threadIdx.x + blockIdx.x ∗ blockDim.x;
// --- Each thread gets the same seed (seed), a different sequence number (tid), no offset (0)
if (tid < N) curand_init(seed, tid, 0, &state[tid]);
}
/*********************/
/* GENERATION KERNEL */
/*********************/
__global__ void cuRAND_generation_kernel(curandState ∗ __restrict__ state, int ∗ __restrict__ d_random_numbers, const int N) {
int tid = threadIdx.x + blockIdx.x ∗ blockDim.x;
if (tid < N) {
curandState localState = state[tid];
d_random_numbers[tid] = curand(&localState) % 64;
state[tid] = localState;
}
}
/********/
/* MAIN */
/********/
int main() {
const int N = 10;
int *h_random_numbers = (int*)malloc(N * sizeof(int));
int *d_random_numbers; gpuErrchk(cudaMalloc((void**)&d_random_numbers, N * sizeof(int)));
curandState ∗devStates; gpuErrchk(cudaMalloc((void**)&devStates, N * sizeof(devStates)));
cuRAND_setup_kernel<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(1234, devStates, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cuRAND_generation_kernel<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(devStates, d_random_numbers, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_random_numbers, d_random_numbers, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int i=0; i<N; i++) printf("%i %i\n", i, h_random_numbers[i]);
}
|
dfb37ff26208d306578a6fd8a32825ce820f35dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHTensorCopy.h"
#include "THHApply.cuh"
#include "THHNumerics.cuh"
#include <cfloat>
void THCudaTensor_cat(THCState *state, THCudaTensor *result, THCudaTensor *ta, THCudaTensor *tb, int dimension)
{
THCudaTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCudaTensor_catArray(state, result, inputs, 2, dimension);
}
void THCudaTensor_catArray(THCState *state, THCudaTensor *result, THCudaTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j;
long offset;
int ndim = dimension + 1;
for (i = 0; i < numInputs; i++)
{
ndim = THMax(ndim, THCudaTensor_nDimension(state, inputs[i]));
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension+1);
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
long dimSize = i < THCudaTensor_nDimension(state, inputs[0])
? THCudaTensor_size(state, inputs[0], i)
: 1;
if (i == dimension)
{
for (j = 1; j < numInputs; j++)
{
dimSize += i < THCudaTensor_nDimension(state, inputs[j])
? THCudaTensor_size(state, inputs[j], i)
: 1;
}
}
else
{
for (j = 1; j < numInputs; j++)
{
if (dimSize != (i < THCudaTensor_nDimension(state, inputs[j])
? THCudaTensor_size(state, inputs[j], i)
: 1)) {
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
}
}
size->data[i] = dimSize;
}
THCudaTensor_resize(state, result, size, NULL);
THLongStorage_free(size);
offset = 0;
for (j = 0; j < numInputs; j++)
{
long dimSize = dimension < THCudaTensor_nDimension(state, inputs[j])
? THCudaTensor_size(state, inputs[j], dimension)
: 1;
THCudaTensor *nt = THCudaTensor_newWithTensor(state, result);
THCudaTensor_narrow(state, nt, NULL, dimension, offset, dimSize);
THCudaTensor_copy(state, nt, inputs[j]);
THCudaTensor_free(state, nt);
offset += dimSize;
}
}
struct TensorAddCMulOp {
TensorAddCMulOp(float v) : val(v) {}
__device__ __forceinline__ void
operator()(float* out, float* in1, float* in2) {
*out += val * *in1 * *in2;
}
float val;
};
void THCudaTensor_addcmul(THCState *state, THCudaTensor *self_, THCudaTensor *t, float value, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCudaTensor_resizeAs(state, self_, t);
THCudaTensor_copy(state, self_, t);
}
else
{
THArgCheck(THCudaTensor_nElement(state, self_) == THCudaTensor_nElement(state, src1),
1, "sizes do not match");
}
THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
struct TensorAddCDivOp {
TensorAddCDivOp(float v) : val(v) {}
__device__ __forceinline__ void
operator()(float* out, float* in1, float* in2) {
*out += val * *in1 / *in2;
}
float val;
};
void THCudaTensor_addcdiv(THCState *state, THCudaTensor *self_, THCudaTensor *t, float value, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCudaTensor_resizeAs(state, self_, t);
THCudaTensor_copy(state, self_, t);
}
else
{
THArgCheck(THCudaTensor_nElement(state, self_) == THCudaTensor_nElement(state, src1),
1, "sizes do not match");
}
THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
template <typename T>
struct TensorFillOp {
TensorFillOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* v) { *v = val; }
const T val;
};
#include "generic/THCTensorMath.cu"
#include "THHGenerateAllTypes.h"
| dfb37ff26208d306578a6fd8a32825ce820f35dd.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCTensorCopy.h"
#include "THCApply.cuh"
#include "THCNumerics.cuh"
#include <cfloat>
void THCudaTensor_cat(THCState *state, THCudaTensor *result, THCudaTensor *ta, THCudaTensor *tb, int dimension)
{
THCudaTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCudaTensor_catArray(state, result, inputs, 2, dimension);
}
void THCudaTensor_catArray(THCState *state, THCudaTensor *result, THCudaTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j;
long offset;
int ndim = dimension + 1;
for (i = 0; i < numInputs; i++)
{
ndim = THMax(ndim, THCudaTensor_nDimension(state, inputs[i]));
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension+1);
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
long dimSize = i < THCudaTensor_nDimension(state, inputs[0])
? THCudaTensor_size(state, inputs[0], i)
: 1;
if (i == dimension)
{
for (j = 1; j < numInputs; j++)
{
dimSize += i < THCudaTensor_nDimension(state, inputs[j])
? THCudaTensor_size(state, inputs[j], i)
: 1;
}
}
else
{
for (j = 1; j < numInputs; j++)
{
if (dimSize != (i < THCudaTensor_nDimension(state, inputs[j])
? THCudaTensor_size(state, inputs[j], i)
: 1)) {
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
}
}
size->data[i] = dimSize;
}
THCudaTensor_resize(state, result, size, NULL);
THLongStorage_free(size);
offset = 0;
for (j = 0; j < numInputs; j++)
{
long dimSize = dimension < THCudaTensor_nDimension(state, inputs[j])
? THCudaTensor_size(state, inputs[j], dimension)
: 1;
THCudaTensor *nt = THCudaTensor_newWithTensor(state, result);
THCudaTensor_narrow(state, nt, NULL, dimension, offset, dimSize);
THCudaTensor_copy(state, nt, inputs[j]);
THCudaTensor_free(state, nt);
offset += dimSize;
}
}
struct TensorAddCMulOp {
TensorAddCMulOp(float v) : val(v) {}
__device__ __forceinline__ void
operator()(float* out, float* in1, float* in2) {
*out += val * *in1 * *in2;
}
float val;
};
void THCudaTensor_addcmul(THCState *state, THCudaTensor *self_, THCudaTensor *t, float value, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCudaTensor_resizeAs(state, self_, t);
THCudaTensor_copy(state, self_, t);
}
else
{
THArgCheck(THCudaTensor_nElement(state, self_) == THCudaTensor_nElement(state, src1),
1, "sizes do not match");
}
THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
struct TensorAddCDivOp {
TensorAddCDivOp(float v) : val(v) {}
__device__ __forceinline__ void
operator()(float* out, float* in1, float* in2) {
*out += val * *in1 / *in2;
}
float val;
};
void THCudaTensor_addcdiv(THCState *state, THCudaTensor *self_, THCudaTensor *t, float value, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCudaTensor_resizeAs(state, self_, t);
THCudaTensor_copy(state, self_, t);
}
else
{
THArgCheck(THCudaTensor_nElement(state, self_) == THCudaTensor_nElement(state, src1),
1, "sizes do not match");
}
THArgCheck(THCudaTensor_nElement(state, src1) == THCudaTensor_nElement(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
template <typename T>
struct TensorFillOp {
TensorFillOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* v) { *v = val; }
const T val;
};
#include "generic/THCTensorMath.cu"
#include "THCGenerateAllTypes.h"
|
057342cf7c346e3a2f867c7b182fbdf5efa74979.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void hello(char *a, int *b)
{
for (int i=0; i<7; ++i)
{
a[i] += b[i];
}
} | 057342cf7c346e3a2f867c7b182fbdf5efa74979.cu | #include "includes.h"
__global__ void hello(char *a, int *b)
{
for (int i=0; i<7; ++i)
{
a[i] += b[i];
}
} |
26ede1c05fc3772c5ce9dac0bbd9d1356e00a994.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"body.h"
#include<iostream>
#include <time.h> //Used by fillRandom to get a variable seed
#include<random>
using namespace std;
#define N 1000
//adapted from: Scott Griffith's LM5 example
//date Accessed: 12/10/2018
__global__ void move(Body *a, float *t){
int tid = blockIdx.x;
if(tid < N){
for(int i =0; i < N; i++){
if(i != tid){
a[tid].update(a[i], *t); //void update_pos(Body b, float nf, float t)
}
}
}
}
//////////////////////////////////////////
// Random int Generation
// Developed and introduced by Kent Jones 2016
//adapted from: Scott Griffith's LM1_part 4
//accessed: 12/10/2018
//////////////////////////////////////////
random_device rd; // Used to produce a random seed
default_random_engine engine(rd()); // Use the defualt random number generator engine
std::uniform_real_distribution<> distribution(0, 1000); // Generate a uniform real distribution between 0, 1
//
//Thread-safe C++11 pseudo-random number generator
//@return returns a random value between 0 and 1
//
double cs273_rand() {
return distribution(engine);
}
////////////////////////////////////////////
int main(){
Body* objects[N];
for(int i = 0; i < N; i++){
objects[i] = new Body(float(cs273_rand()), float(cs273_rand()));
}
// for(int i = 0; i < N; i++){
// objects[i] -> where_am_i();
// }
// Body* b1 = new Body(20.65, 50.55);
// Body* b2 = new Body(150.5, 210.35);
// Body* objects[2];
// objects[0] = b1;
// objects[1] = b2;
//Body* result[N]; //Host data, a/b input data, c output data
Body* dev_a; //Device data pointers for the main data
float* dev_time; //device data pointer for time
//Allocate memory on the device
hipMalloc( (void**)&dev_a, N*sizeof(Body*));
//hipMalloc( (void**)&dev_b, N*sizeof(Body*));
hipMalloc( (void**)&dev_time, 1*sizeof(int));
const float t = 0.01; //the time step in seconds
float time_passed = 0.0;
hipMemcpy(dev_time, time_passed, 1*sizeof(int), hipMemcpyHostToDevice);
//add<<<1,100>>>(dev_a, dev_time);
for( int i = 0; i < 200000; i++){
hipMemcpy(dev_a,objects,N*sizeof(Body*),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( move), dim3(1),dim3(1024), 0, 0, dev_a, dev_time);
hipMemcpy(objects,dev_a,N*sizeof(Body*),hipMemcpyDeviceToHost);
time_passed += t;
hipMemcpy(dev_time, time_passed, 1*sizeof(int), hipMemcpyHostToDevice);
}
for(int i = 0; i < N; i++){
objects[i] -> where_am_i();
cout << endl;
}
return 0;
} | 26ede1c05fc3772c5ce9dac0bbd9d1356e00a994.cu | #include"body.h"
#include<iostream>
#include <time.h> //Used by fillRandom to get a variable seed
#include<random>
using namespace std;
#define N 1000
//adapted from: Scott Griffith's LM5 example
//date Accessed: 12/10/2018
__global__ void move(Body *a, float *t){
int tid = blockIdx.x;
if(tid < N){
for(int i =0; i < N; i++){
if(i != tid){
a[tid].update(a[i], *t); //void update_pos(Body b, float nf, float t)
}
}
}
}
//////////////////////////////////////////
// Random int Generation
// Developed and introduced by Kent Jones 2016
//adapted from: Scott Griffith's LM1_part 4
//accessed: 12/10/2018
//////////////////////////////////////////
random_device rd; // Used to produce a random seed
default_random_engine engine(rd()); // Use the defualt random number generator engine
std::uniform_real_distribution<> distribution(0, 1000); // Generate a uniform real distribution between 0, 1
//
//Thread-safe C++11 pseudo-random number generator
//@return returns a random value between 0 and 1
//
double cs273_rand() {
return distribution(engine);
}
////////////////////////////////////////////
int main(){
Body* objects[N];
for(int i = 0; i < N; i++){
objects[i] = new Body(float(cs273_rand()), float(cs273_rand()));
}
// for(int i = 0; i < N; i++){
// objects[i] -> where_am_i();
// }
// Body* b1 = new Body(20.65, 50.55);
// Body* b2 = new Body(150.5, 210.35);
// Body* objects[2];
// objects[0] = b1;
// objects[1] = b2;
//Body* result[N]; //Host data, a/b input data, c output data
Body* dev_a; //Device data pointers for the main data
float* dev_time; //device data pointer for time
//Allocate memory on the device
cudaMalloc( (void**)&dev_a, N*sizeof(Body*));
//cudaMalloc( (void**)&dev_b, N*sizeof(Body*));
cudaMalloc( (void**)&dev_time, 1*sizeof(int));
const float t = 0.01; //the time step in seconds
float time_passed = 0.0;
cudaMemcpy(dev_time, time_passed, 1*sizeof(int), cudaMemcpyHostToDevice);
//add<<<1,100>>>(dev_a, dev_time);
for( int i = 0; i < 200000; i++){
cudaMemcpy(dev_a,objects,N*sizeof(Body*),cudaMemcpyHostToDevice);
move<<<1,1024>>>(dev_a, dev_time);
cudaMemcpy(objects,dev_a,N*sizeof(Body*),cudaMemcpyDeviceToHost);
time_passed += t;
cudaMemcpy(dev_time, time_passed, 1*sizeof(int), cudaMemcpyHostToDevice);
}
for(int i = 0; i < N; i++){
objects[i] -> where_am_i();
cout << endl;
}
return 0;
} |
3d7452aa521acb1d788b56dfb841abda241efddc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
void profileCopies(float *h_a,
float *h_b,
float *d,
unsigned int n,
char *desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
float time;
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; ++i) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed ***\n", desc);
break;
}
}
// clean up events
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
}
int main()
{
unsigned int nElements = 4*1024*1024;
const unsigned int bytes = nElements * sizeof(float);
// host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda( hipHostMalloc((void**)&h_aPinned, bytes) ); // host pinned
checkCuda( hipHostMalloc((void**)&h_bPinned, bytes) ); // host pinned
checkCuda( hipMalloc((void**)&d_a, bytes) ); // device
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, 0) );
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
hipFree(d_a);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
} | 3d7452aa521acb1d788b56dfb841abda241efddc.cu | #include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
void profileCopies(float *h_a,
float *h_b,
float *d,
unsigned int n,
char *desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
float time;
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; ++i) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed ***\n", desc);
break;
}
}
// clean up events
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
}
int main()
{
unsigned int nElements = 4*1024*1024;
const unsigned int bytes = nElements * sizeof(float);
// host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda( cudaMallocHost((void**)&h_aPinned, bytes) ); // host pinned
checkCuda( cudaMallocHost((void**)&h_bPinned, bytes) ); // host pinned
checkCuda( cudaMalloc((void**)&d_a, bytes) ); // device
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
} |
73f556b1e28938df3232a249c59ebcaca5de3475.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include "scanner_header.h"
/************* Functions **************/
__global__ void x7(float* x8, int* x9, float* x10, int x11) {
// this is cuda embedding kernel.
// arg0: 2D embedding table: <n_embedding x embed_size>
// arg1: 1D indices: <indices_size>
// arg2: 2D output: <indices_size x embed_size>
// arg3: embed_size
// invocation assumption: <<<dim3(a,1,1), dim3(indices_size,1,1)>>> where a <= embed_size
// each thread block handles one embedding vector
int x12 = blockDim.x;
int x13 = threadIdx.x;
int x14 = x9[blockIdx.x] * x11;
while (x13 < x11) {
int x15 = x13;
x10[blockIdx.x * x11 + x15] = x8[x14 + x15];
x13 = x13 + x12;
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(1200 * sizeof(float));
scan_floats("golden/embedding/embedding.data", x1, 1200);
float* x2 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x2, (size_t)(1200 * sizeof(float))));
CUDA_CALL(hipMemcpy(x2, x1, (size_t)(1200 * sizeof(float)), hipMemcpyHostToDevice));
int* x3 = (int*)malloc(10 * sizeof(int));
scan_ints("golden/embedding/indices.data", x3, 10);
int* x4 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x4, (size_t)(10 * sizeof(int))));
CUDA_CALL(hipMemcpy(x4, x3, (size_t)(10 * sizeof(int)), hipMemcpyHostToDevice));
float* x5 = (float*)malloc(600 * sizeof(float));
float* x6 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x6, (size_t)(600 * sizeof(float))));
hipLaunchKernelGGL(( x7), dim3(dim3(60, 1, 1)), dim3(dim3(10, 1, 1)), 0, 0, x2, x4, x6, 60);
CUDA_CALL(hipMemcpy(x5, x6, (size_t)(600 * sizeof(float)), hipMemcpyDeviceToHost));
check_float_array("golden/embedding/output.data", x5, 600);
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| 73f556b1e28938df3232a249c59ebcaca5de3475.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include "scanner_header.h"
/************* Functions **************/
__global__ void x7(float* x8, int* x9, float* x10, int x11) {
// this is cuda embedding kernel.
// arg0: 2D embedding table: <n_embedding x embed_size>
// arg1: 1D indices: <indices_size>
// arg2: 2D output: <indices_size x embed_size>
// arg3: embed_size
// invocation assumption: <<<dim3(a,1,1), dim3(indices_size,1,1)>>> where a <= embed_size
// each thread block handles one embedding vector
int x12 = blockDim.x;
int x13 = threadIdx.x;
int x14 = x9[blockIdx.x] * x11;
while (x13 < x11) {
int x15 = x13;
x10[blockIdx.x * x11 + x15] = x8[x14 + x15];
x13 = x13 + x12;
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(1200 * sizeof(float));
scan_floats("golden/embedding/embedding.data", x1, 1200);
float* x2 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x2, (size_t)(1200 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x2, x1, (size_t)(1200 * sizeof(float)), cudaMemcpyHostToDevice));
int* x3 = (int*)malloc(10 * sizeof(int));
scan_ints("golden/embedding/indices.data", x3, 10);
int* x4 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x4, (size_t)(10 * sizeof(int))));
CUDA_CALL(cudaMemcpy(x4, x3, (size_t)(10 * sizeof(int)), cudaMemcpyHostToDevice));
float* x5 = (float*)malloc(600 * sizeof(float));
float* x6 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x6, (size_t)(600 * sizeof(float))));
x7<<<dim3(60, 1, 1), dim3(10, 1, 1)>>>(x2, x4, x6, 60);
CUDA_CALL(cudaMemcpy(x5, x6, (size_t)(600 * sizeof(float)), cudaMemcpyDeviceToHost));
check_float_array("golden/embedding/output.data", x5, 600);
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
f54db0b768a4240d2437f0a3b5b32644b4787c45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: dnlebard
#include "CGCMMAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// small number. cutoff for igoring the angle as being ill defined.
#define SMALL Scalar(0.001)
/*! \file CGCMMAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the CGCMM angle forces. Used by CGCMMAngleForceComputeGPU.
*/
//! Texture for reading angle parameters
scalar2_tex_t angle_params_tex;
//! Texture for reading angle CGCMM S-R parameters
scalar2_tex_t angle_CGCMMsr_tex; // MISSING EPSILON!!! sigma=.x, rcut=.y
//! Texture for reading angle CGCMM Epsilon-pow/pref parameters
scalar4_tex_t angle_CGCMMepow_tex; // now with EPSILON=.x, pow1=.y, pow2=.z, pref=.w
//! Kernel for caculating CGCMM angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos particle positions on the device
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_CGCMM_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
Scalar2 *d_CGCMMsr,
Scalar4 *d_CGCMMepow)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles =n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
Scalar fac, eac, vac[6];
// initialize the virial to 0
Scalar virial_idx[6];
for (int i = 0; i < 6; i++)
virial_idx[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
// store the a and c positions to accumlate their forces
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);;
Scalar rcb = sqrtf(rsqcb);
Scalar rsqac = dot(dac, dac);
Scalar rac = sqrtf(rsqac);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
//////////////////////////////////////////
// THIS CODE DOES THE 1-3 LJ repulsions //
//////////////////////////////////////////////////////////////////////////////
fac = Scalar(0.0);
eac = Scalar(0.0);
for (int i=0; i < 6; i++)
vac[i] = Scalar(0.0);
// get the angle E-S-R parameters (MEM TRANSFER: 12 bytes)
const Scalar2 cgSR = texFetchScalar2(d_CGCMMsr, angle_CGCMMsr_tex, cur_angle_type);
Scalar cgsigma = cgSR.x;
Scalar cgrcut = cgSR.y;
if (rac < cgrcut)
{
const Scalar4 cgEPOW = texFetchScalar4(d_CGCMMepow, angle_CGCMMepow_tex, cur_angle_type);
// get the angle pow/pref parameters (MEM TRANSFER: 12 bytes)
Scalar cgeps = cgEPOW.x;
Scalar cgpow1 = cgEPOW.y;
Scalar cgpow2 = cgEPOW.z;
Scalar cgpref = cgEPOW.w;
Scalar cgratio = cgsigma/rac;
// INTERESTING NOTE: POW has weird behavior depending
// on the inputted parameters. Try sigma=2.05, versus sigma=0.05
// in cgcmm_angle_force_test.cc 4 particle test
fac = cgpref*cgeps / rsqac * (cgpow1*fast::pow(cgratio,cgpow1) - cgpow2*fast::pow(cgratio,cgpow2));
eac = cgeps + cgpref*cgeps * (fast::pow(cgratio,cgpow1) - fast::pow(cgratio,cgpow2));
vac[0] = fac * dac.x*dac.x;
vac[1] = fac * dac.x*dac.y;
vac[2] = fac * dac.x*dac.z;
vac[3] = fac * dac.y*dac.y;
vac[4] = fac * dac.y*dac.z;
vac[5] = fac * dac.z*dac.z;
}
//////////////////////////////////////////////////////////////////////////////
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = (Scalar(0.5)*tk*dth + eac)*Scalar(Scalar(1.0)/Scalar(3.0));
Scalar angle_virial[6];
angle_virial[0] = (1.f/3.f) * ( dab.x*fab[0] + dcb.x*fcb[0] );
angle_virial[1] = (1.f/3.f) * ( dab.y*fab[0] + dcb.y*fcb[0] );
angle_virial[2] = (1.f/3.f) * ( dab.z*fab[0] + dcb.z*fcb[0] );
angle_virial[3] = (1.f/3.f) * ( dab.y*fab[1] + dcb.y*fcb[1] );
angle_virial[4] = (1.f/3.f) * ( dab.z*fab[1] + dcb.z*fcb[1] );
angle_virial[5] = (1.f/3.f) * ( dab.z*fab[2] + dcb.z*fcb[2] );
for (int i = 0; i < 6; i++)
angle_virial[i] += (1.f/3.f)*vac[i];
if (cur_angle_abc == 0)
{
force_idx.x += fab[0] + fac*dac.x;
force_idx.y += fab[1] + fac*dac.y;
force_idx.z += fab[2] + fac*dac.z;
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0] - fac*dac.x;
force_idx.y += fcb[1] - fac*dac.y;
force_idx.z += fcb[2] - fac*dac.z;
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial_idx[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial_idx[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos particle positions on the device
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param d_CGCMMsr sigma, and rcut packed as a Scalar2
\param d_CGCMMepow epsilon, pow1, pow2, and prefactor packed as a Scalar4
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\param compute_capability Compute capability of the device (200, 300, 350, ...)
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
hipError_t gpu_compute_CGCMM_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
Scalar2 *d_CGCMMsr,
Scalar4 *d_CGCMMepow,
unsigned int n_angle_types,
int block_size,
const unsigned int compute_capability)
{
assert(d_params);
assert(d_CGCMMsr);
assert(d_CGCMMepow);
if (N == 0)
return hipSuccess;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_compute_CGCMM_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)N / (double)run_block_size), 1, 1);
dim3 threads(run_block_size, 1, 1);
// bind the textures on pre sm 35 arches
if (compute_capability < 350)
{
hipError_t error = hipBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types);
if (error != hipSuccess)
return error;
error = hipBindTexture(0, angle_CGCMMsr_tex, d_CGCMMsr, sizeof(Scalar2) * n_angle_types);
if (error != hipSuccess)
return error;
error = hipBindTexture(0, angle_CGCMMepow_tex, d_CGCMMepow, sizeof(Scalar4) * n_angle_types);
if (error != hipSuccess)
return error;
}
// run the kernel
hipLaunchKernelGGL(( gpu_compute_CGCMM_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force,
d_virial,
virial_pitch,
N,
d_pos,
box,
atable,
apos_list,
pitch,
n_angles_list,
d_params,
d_CGCMMsr,
d_CGCMMepow);
return hipSuccess;
}
| f54db0b768a4240d2437f0a3b5b32644b4787c45.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: dnlebard
#include "CGCMMAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// small number. cutoff for igoring the angle as being ill defined.
#define SMALL Scalar(0.001)
/*! \file CGCMMAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the CGCMM angle forces. Used by CGCMMAngleForceComputeGPU.
*/
//! Texture for reading angle parameters
scalar2_tex_t angle_params_tex;
//! Texture for reading angle CGCMM S-R parameters
scalar2_tex_t angle_CGCMMsr_tex; // MISSING EPSILON!!! sigma=.x, rcut=.y
//! Texture for reading angle CGCMM Epsilon-pow/pref parameters
scalar4_tex_t angle_CGCMMepow_tex; // now with EPSILON=.x, pow1=.y, pow2=.z, pref=.w
//! Kernel for caculating CGCMM angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos particle positions on the device
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_CGCMM_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
Scalar2 *d_CGCMMsr,
Scalar4 *d_CGCMMepow)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles =n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
Scalar fac, eac, vac[6];
// initialize the virial to 0
Scalar virial_idx[6];
for (int i = 0; i < 6; i++)
virial_idx[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
// store the a and c positions to accumlate their forces
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);;
Scalar rcb = sqrtf(rsqcb);
Scalar rsqac = dot(dac, dac);
Scalar rac = sqrtf(rsqac);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
//////////////////////////////////////////
// THIS CODE DOES THE 1-3 LJ repulsions //
//////////////////////////////////////////////////////////////////////////////
fac = Scalar(0.0);
eac = Scalar(0.0);
for (int i=0; i < 6; i++)
vac[i] = Scalar(0.0);
// get the angle E-S-R parameters (MEM TRANSFER: 12 bytes)
const Scalar2 cgSR = texFetchScalar2(d_CGCMMsr, angle_CGCMMsr_tex, cur_angle_type);
Scalar cgsigma = cgSR.x;
Scalar cgrcut = cgSR.y;
if (rac < cgrcut)
{
const Scalar4 cgEPOW = texFetchScalar4(d_CGCMMepow, angle_CGCMMepow_tex, cur_angle_type);
// get the angle pow/pref parameters (MEM TRANSFER: 12 bytes)
Scalar cgeps = cgEPOW.x;
Scalar cgpow1 = cgEPOW.y;
Scalar cgpow2 = cgEPOW.z;
Scalar cgpref = cgEPOW.w;
Scalar cgratio = cgsigma/rac;
// INTERESTING NOTE: POW has weird behavior depending
// on the inputted parameters. Try sigma=2.05, versus sigma=0.05
// in cgcmm_angle_force_test.cc 4 particle test
fac = cgpref*cgeps / rsqac * (cgpow1*fast::pow(cgratio,cgpow1) - cgpow2*fast::pow(cgratio,cgpow2));
eac = cgeps + cgpref*cgeps * (fast::pow(cgratio,cgpow1) - fast::pow(cgratio,cgpow2));
vac[0] = fac * dac.x*dac.x;
vac[1] = fac * dac.x*dac.y;
vac[2] = fac * dac.x*dac.z;
vac[3] = fac * dac.y*dac.y;
vac[4] = fac * dac.y*dac.z;
vac[5] = fac * dac.z*dac.z;
}
//////////////////////////////////////////////////////////////////////////////
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = (Scalar(0.5)*tk*dth + eac)*Scalar(Scalar(1.0)/Scalar(3.0));
Scalar angle_virial[6];
angle_virial[0] = (1.f/3.f) * ( dab.x*fab[0] + dcb.x*fcb[0] );
angle_virial[1] = (1.f/3.f) * ( dab.y*fab[0] + dcb.y*fcb[0] );
angle_virial[2] = (1.f/3.f) * ( dab.z*fab[0] + dcb.z*fcb[0] );
angle_virial[3] = (1.f/3.f) * ( dab.y*fab[1] + dcb.y*fcb[1] );
angle_virial[4] = (1.f/3.f) * ( dab.z*fab[1] + dcb.z*fcb[1] );
angle_virial[5] = (1.f/3.f) * ( dab.z*fab[2] + dcb.z*fcb[2] );
for (int i = 0; i < 6; i++)
angle_virial[i] += (1.f/3.f)*vac[i];
if (cur_angle_abc == 0)
{
force_idx.x += fab[0] + fac*dac.x;
force_idx.y += fab[1] + fac*dac.y;
force_idx.z += fab[2] + fac*dac.z;
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0] - fac*dac.x;
force_idx.y += fcb[1] - fac*dac.y;
force_idx.z += fcb[2] - fac*dac.z;
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial_idx[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial_idx[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos particle positions on the device
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param d_CGCMMsr sigma, and rcut packed as a Scalar2
\param d_CGCMMepow epsilon, pow1, pow2, and prefactor packed as a Scalar4
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\param compute_capability Compute capability of the device (200, 300, 350, ...)
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
cudaError_t gpu_compute_CGCMM_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
Scalar2 *d_CGCMMsr,
Scalar4 *d_CGCMMepow,
unsigned int n_angle_types,
int block_size,
const unsigned int compute_capability)
{
assert(d_params);
assert(d_CGCMMsr);
assert(d_CGCMMepow);
if (N == 0)
return cudaSuccess;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_compute_CGCMM_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)N / (double)run_block_size), 1, 1);
dim3 threads(run_block_size, 1, 1);
// bind the textures on pre sm 35 arches
if (compute_capability < 350)
{
cudaError_t error = cudaBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types);
if (error != cudaSuccess)
return error;
error = cudaBindTexture(0, angle_CGCMMsr_tex, d_CGCMMsr, sizeof(Scalar2) * n_angle_types);
if (error != cudaSuccess)
return error;
error = cudaBindTexture(0, angle_CGCMMepow_tex, d_CGCMMepow, sizeof(Scalar4) * n_angle_types);
if (error != cudaSuccess)
return error;
}
// run the kernel
gpu_compute_CGCMM_angle_forces_kernel<<< grid, threads>>>(d_force,
d_virial,
virial_pitch,
N,
d_pos,
box,
atable,
apos_list,
pitch,
n_angles_list,
d_params,
d_CGCMMsr,
d_CGCMMepow);
return cudaSuccess;
}
|
7b7823e28c88de08a65a58d943649985c01d6147.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <valhalla.hpp>
namespace vll {
template<typename T>
void reduce(const int num, bool gen = true, int iter = 0)
{
static thrust::device_vector<T> d_vec;
if (gen) {
thrust::host_vector<T> h_vec(num);
std::generate(h_vec.begin(), h_vec.end(), rand);
d_vec = h_vec;
hipDeviceSynchronize();
}
for (int i = 0; i < iter; i++) {
T x = thrust::reduce(d_vec.begin(), d_vec.end());
}
hipDeviceSynchronize();
}
template<typename T>
void generate(const int num)
{
try {
reduce<T>(num, true, 0);
} catch(const thrust::system_error &ex) {
std::cout << ex.what() << std::endl;
throw;
}
}
template<typename T>
void run(const int iter)
{
try {
reduce<T>(-1, false, iter);
} catch(const thrust::system_error &ex) {
std::cout << ex.what() << std::endl;
throw;
}
}
}
int main(int argc, const char **args)
{
vll::launch(argc, args);
}
| 7b7823e28c88de08a65a58d943649985c01d6147.cu | #include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <valhalla.hpp>
namespace vll {
template<typename T>
void reduce(const int num, bool gen = true, int iter = 0)
{
static thrust::device_vector<T> d_vec;
if (gen) {
thrust::host_vector<T> h_vec(num);
std::generate(h_vec.begin(), h_vec.end(), rand);
d_vec = h_vec;
cudaDeviceSynchronize();
}
for (int i = 0; i < iter; i++) {
T x = thrust::reduce(d_vec.begin(), d_vec.end());
}
cudaDeviceSynchronize();
}
template<typename T>
void generate(const int num)
{
try {
reduce<T>(num, true, 0);
} catch(const thrust::system_error &ex) {
std::cout << ex.what() << std::endl;
throw;
}
}
template<typename T>
void run(const int iter)
{
try {
reduce<T>(-1, false, iter);
} catch(const thrust::system_error &ex) {
std::cout << ex.what() << std::endl;
throw;
}
}
}
int main(int argc, const char **args)
{
vll::launch(argc, args);
}
|
bb235c700a7dc2e886cad061f6ef2f680c5b3a7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include <cmath>
#include <string>
#include <cstdio>
#include <iomanip>
#include "dcdread.h"
#include<assert.h>
#include <nvtx3/roctracer/roctx.h>
using namespace std;
//additional error handling code
static void HandleError(hipError_t err,
const char *file, int line) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//declaration of GPU function
__global__ void pair_gpu(const double* d_x, const double* d_y, const double* d_z,
unsigned long long int *d_g2, int numatm, int nconf,
double xbox, double ybox, double zbox, int d_bin);
int main(int argc , char* argv[])
{
double xbox,ybox,zbox;
double* d_x,*d_y,*d_z;
unsigned long long int *d_g2;
int nbin;
int device;
int numatm,nconf,inconf;
string file;
///////////////////////////////////////////////////////////////
inconf = 10;
nbin = 2000;
file = "../input/alk.traj.dcd";
device = 0;
HANDLE_ERROR (hipSetDevice(device));//pick the device to use
///////////////////////////////////////
std::ifstream infile;
infile.open(file.c_str());
if(!infile){
cout<<"file "<<file.c_str()<<" not found\n";
return 1;
}
assert(infile);
ofstream pairfile,stwo;
pairfile.open("RDF.dat");
stwo.open("Pair_entropy.dat");
/////////////////////////////////////////////////////////
dcdreadhead(&numatm,&nconf,infile);
cout<<"Dcd file has "<< numatm << " atoms and " << nconf << " frames"<<endl;
if (inconf>nconf) cout << "nconf is reset to "<< nconf <<endl;
else {nconf = inconf;}
cout<<"Calculating RDF for " << nconf << " frames"<<endl;
////////////////////////////////////////////////////////
unsigned long long int sizef= nconf*numatm*sizeof(double);
unsigned long long int sizebin= nbin*sizeof(unsigned long long int);
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&d_x, sizef);
hipMallocManaged(&d_y, sizef);
hipMallocManaged(&d_z, sizef);
hipMallocManaged(&d_g2, sizebin);
HANDLE_ERROR (hipPeekAtLastError());
memset(d_g2,0,sizebin);
/////////reading cordinates//////////////////////////////////////////////
roctxRangePush("Read_File");
double ax[numatm],ay[numatm],az[numatm];
for (int i=0;i<nconf;i++) {
dcdreadframe(ax,ay,az,infile,numatm,xbox,ybox,zbox);
for (int j=0;j<numatm;j++){
d_x[i*numatm+j]=ax[j];
d_y[i*numatm+j]=ay[j];
d_z[i*numatm+j]=az[j];
}
}
roctxRangePop(); //pop for Reading file
roctxRangePush("Pair_Calculation");
cout<<"Reading of input file and transfer to gpu is completed"<<endl;
//////////////////////////////////////////////////////////////////////////
dim3 nthreads(128, 1, 1);
dim3 nblock;
nblock.x = (numatm + nthreads.x - 1)/nthreads.x;
nblock.y = (numatm + nthreads.y - 1)/nthreads.y;
nblock.z = 1;
hipLaunchKernelGGL(( pair_gpu), dim3(nblock), dim3(nthreads), 0, 0,
d_x, d_y, d_z, d_g2, numatm, nconf, xbox, ybox, zbox, nbin);
HANDLE_ERROR (hipPeekAtLastError());
HANDLE_ERROR(hipDeviceSynchronize());
roctxRangePop(); //Pop for Pair Calculation
double pi=acos(-1.0l);
double rho=(numatm)/(xbox*ybox*zbox);
double norm=(4.0l*pi*rho)/3.0l;
double rl,ru,nideal;
double g2[nbin];
double r,gr,lngr,lngrbond,s2=0.0l,s2bond=0.0l;
double box=min(xbox,ybox);
box=min(box,zbox);
double del=box/(2.0l*nbin);
roctxRangePush("Entropy_Calculation");
for (int i=0;i<nbin;i++) {
rl=(i)*del;
ru=rl+del;
nideal=norm*(ru*ru*ru-rl*rl*rl);
g2[i]=(double)d_g2[i]/((double)nconf*(double)numatm*nideal);
r=(i)*del;
pairfile<<(i+0.5l)*del<<" "<<g2[i]<<endl;
if (r<2.0l) {
gr=0.0l;
}
else {
gr=g2[i];
}
if (gr<1e-5) {
lngr=0.0l;
}
else {
lngr=log(gr);
}
if (g2[i]<1e-6) {
lngrbond=0.0l;
}
else {
lngrbond=log(g2[i]);
}
s2=s2-2.0l*pi*rho*((gr*lngr)-gr+1.0l)*del*r*r;
s2bond=s2bond-2.0l*pi*rho*((g2[i]*lngrbond)-g2[i]+1.0l)*del*r*r;
}
roctxRangePop(); //Pop for Entropy Calculation
stwo<<"s2 value is "<<s2<<endl;
stwo<<"s2bond value is "<<s2bond<<endl;
cout<<"#Freeing memory"<<endl;
// Free memory
HANDLE_ERROR(hipFree(d_x));
HANDLE_ERROR(hipFree(d_y));
HANDLE_ERROR(hipFree(d_z));
HANDLE_ERROR(hipFree(d_g2));
cout<<"#Number of atoms processed: "<<numatm<<endl<<endl;
cout<<"#Number of confs processed: "<<nconf<<endl<<endl;
return 0;
}
__global__ void pair_gpu(const double* d_x, const double* d_y, const double* d_z,
unsigned long long int *d_g2, int numatm, int nconf,
double xbox, double ybox, double zbox, int d_bin)
{
double r, cut, dx, dy, dz;
int ig2;
double box;
box = min(xbox, ybox);
box = min(box, zbox);
double del = box / (2.0 * d_bin);
cut = box * 0.5;
int id1 = blockIdx.y * blockDim.y + threadIdx.y;
int id2 = blockIdx.x * blockDim.x + threadIdx.x;
if (id1 >= numatm || id2 >= numatm) return;
if (id1 > id2) return;
for (int frame = 0; frame < nconf; ++frame) {
dx = d_x[frame * numatm + id1] - d_x[frame * numatm + id2];
dy = d_y[frame * numatm + id1] - d_y[frame * numatm + id2];
dz = d_z[frame * numatm + id1] - d_z[frame * numatm + id2];
dx = dx - xbox * (round(dx / xbox));
dy = dy - ybox * (round(dy / ybox));
dz = dz - zbox * (round(dz / zbox));
r = sqrtf(dx * dx + dy * dy + dz * dz);
if (r < cut) {
ig2 = (int)(r / del);
atomicAdd(&d_g2[ig2], 2);
}
}
}
| bb235c700a7dc2e886cad061f6ef2f680c5b3a7e.cu | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#include <cmath>
#include <string>
#include <cstdio>
#include <iomanip>
#include "dcdread.h"
#include<assert.h>
#include <nvtx3/nvToolsExt.h>
using namespace std;
//additional error handling code
static void HandleError(cudaError_t err,
const char *file, int line) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//declaration of GPU function
__global__ void pair_gpu(const double* d_x, const double* d_y, const double* d_z,
unsigned long long int *d_g2, int numatm, int nconf,
double xbox, double ybox, double zbox, int d_bin);
int main(int argc , char* argv[])
{
double xbox,ybox,zbox;
double* d_x,*d_y,*d_z;
unsigned long long int *d_g2;
int nbin;
int device;
int numatm,nconf,inconf;
string file;
///////////////////////////////////////////////////////////////
inconf = 10;
nbin = 2000;
file = "../input/alk.traj.dcd";
device = 0;
HANDLE_ERROR (cudaSetDevice(device));//pick the device to use
///////////////////////////////////////
std::ifstream infile;
infile.open(file.c_str());
if(!infile){
cout<<"file "<<file.c_str()<<" not found\n";
return 1;
}
assert(infile);
ofstream pairfile,stwo;
pairfile.open("RDF.dat");
stwo.open("Pair_entropy.dat");
/////////////////////////////////////////////////////////
dcdreadhead(&numatm,&nconf,infile);
cout<<"Dcd file has "<< numatm << " atoms and " << nconf << " frames"<<endl;
if (inconf>nconf) cout << "nconf is reset to "<< nconf <<endl;
else {nconf = inconf;}
cout<<"Calculating RDF for " << nconf << " frames"<<endl;
////////////////////////////////////////////////////////
unsigned long long int sizef= nconf*numatm*sizeof(double);
unsigned long long int sizebin= nbin*sizeof(unsigned long long int);
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&d_x, sizef);
cudaMallocManaged(&d_y, sizef);
cudaMallocManaged(&d_z, sizef);
cudaMallocManaged(&d_g2, sizebin);
HANDLE_ERROR (cudaPeekAtLastError());
memset(d_g2,0,sizebin);
/////////reading cordinates//////////////////////////////////////////////
nvtxRangePush("Read_File");
double ax[numatm],ay[numatm],az[numatm];
for (int i=0;i<nconf;i++) {
dcdreadframe(ax,ay,az,infile,numatm,xbox,ybox,zbox);
for (int j=0;j<numatm;j++){
d_x[i*numatm+j]=ax[j];
d_y[i*numatm+j]=ay[j];
d_z[i*numatm+j]=az[j];
}
}
nvtxRangePop(); //pop for Reading file
nvtxRangePush("Pair_Calculation");
cout<<"Reading of input file and transfer to gpu is completed"<<endl;
//////////////////////////////////////////////////////////////////////////
dim3 nthreads(128, 1, 1);
dim3 nblock;
nblock.x = (numatm + nthreads.x - 1)/nthreads.x;
nblock.y = (numatm + nthreads.y - 1)/nthreads.y;
nblock.z = 1;
pair_gpu<<<nblock, nthreads>>>
(d_x, d_y, d_z, d_g2, numatm, nconf, xbox, ybox, zbox, nbin);
HANDLE_ERROR (cudaPeekAtLastError());
HANDLE_ERROR(cudaDeviceSynchronize());
nvtxRangePop(); //Pop for Pair Calculation
double pi=acos(-1.0l);
double rho=(numatm)/(xbox*ybox*zbox);
double norm=(4.0l*pi*rho)/3.0l;
double rl,ru,nideal;
double g2[nbin];
double r,gr,lngr,lngrbond,s2=0.0l,s2bond=0.0l;
double box=min(xbox,ybox);
box=min(box,zbox);
double del=box/(2.0l*nbin);
nvtxRangePush("Entropy_Calculation");
for (int i=0;i<nbin;i++) {
rl=(i)*del;
ru=rl+del;
nideal=norm*(ru*ru*ru-rl*rl*rl);
g2[i]=(double)d_g2[i]/((double)nconf*(double)numatm*nideal);
r=(i)*del;
pairfile<<(i+0.5l)*del<<" "<<g2[i]<<endl;
if (r<2.0l) {
gr=0.0l;
}
else {
gr=g2[i];
}
if (gr<1e-5) {
lngr=0.0l;
}
else {
lngr=log(gr);
}
if (g2[i]<1e-6) {
lngrbond=0.0l;
}
else {
lngrbond=log(g2[i]);
}
s2=s2-2.0l*pi*rho*((gr*lngr)-gr+1.0l)*del*r*r;
s2bond=s2bond-2.0l*pi*rho*((g2[i]*lngrbond)-g2[i]+1.0l)*del*r*r;
}
nvtxRangePop(); //Pop for Entropy Calculation
stwo<<"s2 value is "<<s2<<endl;
stwo<<"s2bond value is "<<s2bond<<endl;
cout<<"#Freeing memory"<<endl;
// Free memory
HANDLE_ERROR(cudaFree(d_x));
HANDLE_ERROR(cudaFree(d_y));
HANDLE_ERROR(cudaFree(d_z));
HANDLE_ERROR(cudaFree(d_g2));
cout<<"#Number of atoms processed: "<<numatm<<endl<<endl;
cout<<"#Number of confs processed: "<<nconf<<endl<<endl;
return 0;
}
__global__ void pair_gpu(const double* d_x, const double* d_y, const double* d_z,
unsigned long long int *d_g2, int numatm, int nconf,
double xbox, double ybox, double zbox, int d_bin)
{
double r, cut, dx, dy, dz;
int ig2;
double box;
box = min(xbox, ybox);
box = min(box, zbox);
double del = box / (2.0 * d_bin);
cut = box * 0.5;
int id1 = blockIdx.y * blockDim.y + threadIdx.y;
int id2 = blockIdx.x * blockDim.x + threadIdx.x;
if (id1 >= numatm || id2 >= numatm) return;
if (id1 > id2) return;
for (int frame = 0; frame < nconf; ++frame) {
dx = d_x[frame * numatm + id1] - d_x[frame * numatm + id2];
dy = d_y[frame * numatm + id1] - d_y[frame * numatm + id2];
dz = d_z[frame * numatm + id1] - d_z[frame * numatm + id2];
dx = dx - xbox * (round(dx / xbox));
dy = dy - ybox * (round(dy / ybox));
dz = dz - zbox * (round(dz / zbox));
r = sqrtf(dx * dx + dy * dy + dz * dz);
if (r < cut) {
ig2 = (int)(r / del);
atomicAdd(&d_g2[ig2], 2);
}
}
}
|
detectNet.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "detectNet.h"
#include "cudaUtility.h"
template<typename T>
__global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const int px_idx = y * width + x;
T px = input[px_idx];
const float fx = x;
const float fy = y;
for( int n=0; n < numDetections; n++ )
{
const detectNet::Detection det = detections[n];
// check if this pixel is inside the bounding box
if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom )
{
const float4 color = colors[det.ClassID];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
}
}
output[px_idx] = px;
}
template<typename T>
__global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
hipError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors )
return hipErrorInvalidValue;
// this assumes that the output already has the input image copied to it,
// which if input != output, is done first by detectNet::Detect()
for( int n=0; n < numDetections; n++ )
{
const int boxWidth = (int)detections[n].Width();
const int boxHeight = (int)detections[n].Height();
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
hipLaunchKernelGGL(( gpuDetectionOverlayBox<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]);
}
return hipGetLastError();
}
hipError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( format == IMAGE_RGB8 )
return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA8 )
return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGB32F )
return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA32F )
return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors);
else
return hipErrorInvalidValue;
}
| detectNet.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "detectNet.h"
#include "cudaUtility.h"
template<typename T>
__global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const int px_idx = y * width + x;
T px = input[px_idx];
const float fx = x;
const float fy = y;
for( int n=0; n < numDetections; n++ )
{
const detectNet::Detection det = detections[n];
// check if this pixel is inside the bounding box
if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom )
{
const float4 color = colors[det.ClassID];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
}
}
output[px_idx] = px;
}
template<typename T>
__global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
cudaError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors )
return cudaErrorInvalidValue;
// this assumes that the output already has the input image copied to it,
// which if input != output, is done first by detectNet::Detect()
for( int n=0; n < numDetections; n++ )
{
const int boxWidth = (int)detections[n].Width();
const int boxHeight = (int)detections[n].Height();
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
gpuDetectionOverlayBox<T><<<gridDim, blockDim>>>(input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]);
}
return cudaGetLastError();
}
cudaError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, detectNet::Detection* detections, int numDetections, float4* colors )
{
if( format == IMAGE_RGB8 )
return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA8 )
return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGB32F )
return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA32F )
return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors);
else
return cudaErrorInvalidValue;
}
|
09818e31c7d3fd16474c5c8d200860cecac9a5be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Adrien REMY
*/
#include "magma_internal.h"
#include "zgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/******************************************************************************/
static __device__ void
magmablas_zelementary_multiplication_devfunc(
magma_int_t n,
magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *du,
magmaDoubleComplex *dv)
{
magma_int_t idx, idy;
idx = blockIdx.x * blockDim.x + threadIdx.x;
idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < n/2) && (idy < n/2)) {
dA += idx + idy * ldda;
magmaDoubleComplex a00, a10, a01, a11, b1, b2, b3, b4;
__shared__ magmaDoubleComplex u1[block_height], u2[block_height], v1[block_width], v2[block_width];
du += idx;
dv += idy;
u1[threadIdx.x] = du[0];
u2[threadIdx.x] = du[n/2];
v1[threadIdx.y] = dv[0];
v2[threadIdx.y] = dv[n/2];
__syncthreads();
a00 = dA[0];
a01 = dA[ldda*n/2];
a10 = dA[n/2];
a11 = dA[ldda*n/2+n/2];
b1 = a00 + a01;
b2 = a10 + a11;
b3 = a00 - a01;
b4 = a10 - a11;
dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2);
dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4);
dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2);
dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4);
}
}
/******************************************************************************/
__global__ void
magmablas_zelementary_multiplication_kernel(
magma_int_t n,
magmaDoubleComplex *dA, magma_int_t offsetA, magma_int_t ldda,
magmaDoubleComplex *du, magma_int_t offsetu,
magmaDoubleComplex *dv, magma_int_t offsetv)
{
magmablas_zelementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv );
}
/******************************************************************************/
__global__ void
magmablas_zelementary_multiplication_kernel_batched(
magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t offsetA, magma_int_t ldda,
magmaDoubleComplex *du, magma_int_t offsetu,
magmaDoubleComplex *dv, magma_int_t offsetv)
{
int batchid = blockIdx.z;
magmablas_zelementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv );
}
/******************************************************************************/
static __device__ void
magmablas_zapply_vector_devfunc(
magma_int_t n,
magmaDoubleComplex *du, magmaDoubleComplex *db)
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
magmaDoubleComplex a1,a2;
a1 = du[0]*db[0];
a2 = du[n/2]*db[n/2];
db[0] = a1 + a2;
db[n/2] = a1 -a2;
}
}
/******************************************************************************/
__global__ void
magmablas_zapply_vector_kernel(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb )
{
magmablas_zapply_vector_devfunc(n, du+offsetu, db+offsetb);
}
/******************************************************************************/
__global__ void
magmablas_zapply_vector_kernel_batched(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_zapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/******************************************************************************/
static __device__ void
magmablas_zapply_transpose_vector_devfunc(
magma_int_t n,
magmaDoubleComplex *du,magmaDoubleComplex *db )
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
magmaDoubleComplex a1,a2;
a1 = db[0] + db[n/2];
a2 = db[0] - db[n/2];
db[0] = du[0]*a1;
db[n/2] = du[n/2]*a2;
}
}
/******************************************************************************/
__global__ void
magmablas_zapply_transpose_vector_kernel(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb )
{
magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb);
}
/******************************************************************************/
__global__ void
magmablas_zapply_transpose_vector_kernel_batched(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
| 09818e31c7d3fd16474c5c8d200860cecac9a5be.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Adrien REMY
*/
#include "magma_internal.h"
#include "zgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/******************************************************************************/
static __device__ void
magmablas_zelementary_multiplication_devfunc(
magma_int_t n,
magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *du,
magmaDoubleComplex *dv)
{
magma_int_t idx, idy;
idx = blockIdx.x * blockDim.x + threadIdx.x;
idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < n/2) && (idy < n/2)) {
dA += idx + idy * ldda;
magmaDoubleComplex a00, a10, a01, a11, b1, b2, b3, b4;
__shared__ magmaDoubleComplex u1[block_height], u2[block_height], v1[block_width], v2[block_width];
du += idx;
dv += idy;
u1[threadIdx.x] = du[0];
u2[threadIdx.x] = du[n/2];
v1[threadIdx.y] = dv[0];
v2[threadIdx.y] = dv[n/2];
__syncthreads();
a00 = dA[0];
a01 = dA[ldda*n/2];
a10 = dA[n/2];
a11 = dA[ldda*n/2+n/2];
b1 = a00 + a01;
b2 = a10 + a11;
b3 = a00 - a01;
b4 = a10 - a11;
dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2);
dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4);
dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2);
dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4);
}
}
/******************************************************************************/
__global__ void
magmablas_zelementary_multiplication_kernel(
magma_int_t n,
magmaDoubleComplex *dA, magma_int_t offsetA, magma_int_t ldda,
magmaDoubleComplex *du, magma_int_t offsetu,
magmaDoubleComplex *dv, magma_int_t offsetv)
{
magmablas_zelementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv );
}
/******************************************************************************/
__global__ void
magmablas_zelementary_multiplication_kernel_batched(
magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t offsetA, magma_int_t ldda,
magmaDoubleComplex *du, magma_int_t offsetu,
magmaDoubleComplex *dv, magma_int_t offsetv)
{
int batchid = blockIdx.z;
magmablas_zelementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv );
}
/******************************************************************************/
static __device__ void
magmablas_zapply_vector_devfunc(
magma_int_t n,
magmaDoubleComplex *du, magmaDoubleComplex *db)
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
magmaDoubleComplex a1,a2;
a1 = du[0]*db[0];
a2 = du[n/2]*db[n/2];
db[0] = a1 + a2;
db[n/2] = a1 -a2;
}
}
/******************************************************************************/
__global__ void
magmablas_zapply_vector_kernel(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb )
{
magmablas_zapply_vector_devfunc(n, du+offsetu, db+offsetb);
}
/******************************************************************************/
__global__ void
magmablas_zapply_vector_kernel_batched(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_zapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/******************************************************************************/
static __device__ void
magmablas_zapply_transpose_vector_devfunc(
magma_int_t n,
magmaDoubleComplex *du,magmaDoubleComplex *db )
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
magmaDoubleComplex a1,a2;
a1 = db[0] + db[n/2];
a2 = db[0] - db[n/2];
db[0] = du[0]*a1;
db[n/2] = du[n/2]*a2;
}
}
/******************************************************************************/
__global__ void
magmablas_zapply_transpose_vector_kernel(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb )
{
magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb);
}
/******************************************************************************/
__global__ void
magmablas_zapply_transpose_vector_kernel_batched(
magma_int_t n,
magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
|
74b75bf27612864b0dc8b3ad8661097b4bc9d61e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void process_kernel2(const float* input, float* output, int numElements){
int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x;
int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum;
if(globalThreadId < numElements)
output[globalThreadId] = (float)log(fabs(input[globalThreadId]));
} | 74b75bf27612864b0dc8b3ad8661097b4bc9d61e.cu | #include "includes.h"
__global__ void process_kernel2(const float* input, float* output, int numElements){
int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x;
int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum;
if(globalThreadId < numElements)
output[globalThreadId] = (float)log(fabs(input[globalThreadId]));
} |
f658dec914b21c730bfd470f917746c15ac992ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "operate.cuh"
#include <cmath>
__global__ void ResizeKernal(uchar3* src, int src_width, int src_height, uchar3* dest, int dest_width, int dest_height, float fx, float fy)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= dest_width || iy >= dest_height)
{
return;
}
int src_px = ::floor(ix / fx);
int src_py = ::floor(iy / fy);
src_px = src_px > src_width ? src_width : src_px;
src_px = src_px < 0 ? 0 : src_px;
src_py = src_py > src_height ? src_height : src_py;
src_py = src_py < 0 ? 0 : src_py;
dest[ix + dest_width * iy] = src[src_px + src_width * src_py];
}
Result Operate::CvResize(const cv::Mat &src, const uint32_t &dest_width, const uint32_t &dest_height, cv::Mat &dest)
{
dest.release();
int channel_num = src.channels();
int width = src.cols;
int height = src.rows;
int step = src.step;
printf("src image width: %d, height: %d, channels: %d, step: %d\n", width, height, channel_num, step);
float fx = float(dest_width) / width;
float fy = float(dest_height) / height;
dest.create(cv::Size(dest_width, dest_height), src.type());
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
uchar3* src_dev = nullptr;
uchar3* dest_dev = nullptr;
int src_bytes_size = height * width * sizeof(uchar3);
int dest_bytes_size = dest.rows * dest.cols * sizeof(uchar3);
hipMalloc((void**)&src_dev, src_bytes_size);
hipMalloc((void**)&dest_dev, dest_bytes_size);
hipMemcpy(src_dev, src.data, src_bytes_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ResizeKernal), dim3(grid), dim3(block_), 0, 0, src_dev, width, height, dest_dev, dest_width, dest_height, fx, fy);
hipDeviceSynchronize();
hipMemcpy(dest.data, dest_dev, dest_bytes_size, hipMemcpyDeviceToHost);
hipFree(src_dev);
hipFree(dest_dev);
return SUCCESS;
}
// = x + x + x + x
__global__ void BiLinearKernal(uchar3* src, int src_width, int src_height, uchar3* dest, int dest_width, int dest_height)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= dest_width || iy >= dest_height)
{
return;
}
float scale_x = (float)(src_width) / dest_width;
float scale_y = (float)(src_height) / dest_height;
float px = (ix + 0.5) * scale_x - 0.5;
float py = (iy + 0.5) * scale_y - 0.5;
int src_p00_x = ::floor(px);
if (src_p00_x < 0)
{
src_p00_x = 0;
px = 0;
}
if (src_p00_x >= src_width - 1)
{
src_p00_x = src_width - 2;
px = src_p00_x;
}
float dist_p_p00_x = px - src_p00_x;
float dist_p_p01_x = 1.f - dist_p_p00_x;
int src_p00_y = ::floor(py);
if (src_p00_y < 0)
{
src_p00_y = 0;
py = 0;
}
if (src_p00_y >= src_height - 1)
{
src_p00_y = src_height - 2;
py = src_p00_y;
}
float dist_p_p00_y = py - src_p00_y;
float dist_p_p10_y = 1.f - dist_p_p00_y;
float alph00 = dist_p_p00_x * dist_p_p00_y; //
float alph01 = dist_p_p01_x * dist_p_p00_y; //
float alph10 = dist_p_p00_x * dist_p_p10_y; //
float alph11 = dist_p_p01_x * dist_p_p10_y; //
dest[ix + dest_width * iy].x = src[src_p00_x + src_width * src_p00_y].x * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].x * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].x * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].x * alph00;
dest[ix + dest_width * iy].y = src[src_p00_x + src_width * src_p00_y].y * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].y * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].y * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].y * alph00;
dest[ix + dest_width * iy].z = src[src_p00_x + src_width * src_p00_y].z * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].z * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].z * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].z * alph00;
}
Result Operate::CvBiLinearResize(const cv::Mat &src, const uint32_t &dest_width, const uint32_t &dest_height, cv::Mat &dest)
{
dest.release();
int channel_num = src.channels();
int width = src.cols;
int height = src.rows;
int step = src.step;
printf("src image width: %d, height: %d, channels: %d, step: %d\n", width, height, channel_num, step);
dest.create(cv::Size(dest_width, dest_height), src.type());
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
uchar3* src_dev = nullptr;
uchar3* dest_dev = nullptr;
int src_bytes_size = height * width * sizeof(uchar3);
int dest_bytes_size = dest.rows * dest.cols * sizeof(uchar3);
hipMalloc((void**)&src_dev, src_bytes_size);
hipMalloc((void**)&dest_dev, dest_bytes_size);
hipMemcpy(src_dev, src.data, src_bytes_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( BiLinearKernal), dim3(grid), dim3(block_), 0, 0, src_dev, width, height, dest_dev, dest_width, dest_height);
hipDeviceSynchronize();
hipMemcpy(dest.data, dest_dev, dest_bytes_size, hipMemcpyDeviceToHost);
hipFree(src_dev);
hipFree(dest_dev);
return SUCCESS;
}
__global__ void PadBiLinearKernal(uchar3* src, int src_width, int src_height, uchar3* dest, int dest_width, int dest_height, int roi_width, int roi_height)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= roi_width || iy >= roi_height)
{
return;
}
float scale_x = (float)(src_width) / roi_width;
float scale_y = (float)(src_height) / roi_height;
float px = (ix + 0.5) * scale_x - 0.5;
float py = (iy + 0.5) * scale_y - 0.5;
int src_p00_x = ::floor(px);
if (src_p00_x < 0)
{
src_p00_x = 0;
px = 0;
}
if (src_p00_x >= src_width - 1)
{
src_p00_x = src_width - 2;
px = src_p00_x;
}
float dist_p_p00_x = px - src_p00_x;
float dist_p_p01_x = 1.f - dist_p_p00_x;
int src_p00_y = ::floor(py);
if (src_p00_y < 0)
{
src_p00_y = 0;
py = 0;
}
if (src_p00_y >= src_height - 1)
{
src_p00_y = src_height - 2;
py = src_p00_y;
}
float dist_p_p00_y = py - src_p00_y;
float dist_p_p10_y = 1.f - dist_p_p00_y;
float alph00 = dist_p_p00_x * dist_p_p00_y; //
float alph01 = dist_p_p01_x * dist_p_p00_y; //
float alph10 = dist_p_p00_x * dist_p_p10_y; //
float alph11 = dist_p_p01_x * dist_p_p10_y; //
dest[ix + dest_width * iy].x = src[src_p00_x + src_width * src_p00_y].x * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].x * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].x * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].x * alph00;
// float dest_val = (dest[ix + dest_width * iy].x) * 1.0f / 255;
// printf("====>>> %0.4f", dest_val);
dest[ix + dest_width * iy].y = src[src_p00_x + src_width * src_p00_y].y * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].y * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].y * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].y * alph00;
dest[ix + dest_width * iy].z = src[src_p00_x + src_width * src_p00_y].z * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].z * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].z * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].z * alph00;
}
Result Operate::CvPadResize(const cv::Mat &src, const uint32_t &dest_width, const uint32_t &dest_height,
float &scale, cv::Rect &paste_roi, cv::Mat &dest)
{
dest.release();
int channel_num = src.channels();
int width = src.cols;
int height = src.rows;
int step = src.step;
printf("src image width: %d, height: %d, channels: %d, step: %d\n", width, height, channel_num, step);
dest.create(cv::Size(dest_width, dest_height), src.type());
PadResize(width, height, dest_width, dest_height, paste_roi, scale);
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
uchar3* src_dev = nullptr;
uchar3* dest_dev = nullptr;
int src_bytes_size = height * width * sizeof(uchar3);
int dest_bytes_size = dest.rows * dest.cols * sizeof(uchar3);
hipMalloc((void**)&src_dev, src_bytes_size);
hipMalloc((void**)&dest_dev, dest_bytes_size);
hipMemcpy(src_dev, src.data, src_bytes_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( PadBiLinearKernal), dim3(grid), dim3(block_), 0, 0, src_dev, width, height, dest_dev, dest_width, dest_height, paste_roi.width, paste_roi.height);
hipDeviceSynchronize();
hipMemcpy(dest.data, dest_dev, dest_bytes_size, hipMemcpyDeviceToHost);
hipFree(src_dev);
hipFree(dest_dev);
return SUCCESS;
}
Result Operate::PadResize(const uint32_t &image_width, const uint32_t &image_height, const uint32_t &dest_width, const uint32_t &dest_height,
cv::Rect &dest_roi, float &scale)
{
scale = 1.0f;
if (image_width <= (int)dest_width && image_height <= (int)dest_height)
{
dest_roi.x = 0;
dest_roi.y = 0;
dest_roi.width = image_width;
dest_roi.height = image_height;
}
else if (float(image_width) / image_height >= float(dest_width) / dest_height) // width
{
scale = float(image_width) / dest_width;
dest_roi.x = 0;
dest_roi.y = 0;
dest_roi.width = dest_width;
dest_roi.height = std::round(image_height / scale);
}
else // height
{
scale = float(image_height) / dest_height;
dest_roi.x = 0;
dest_roi.y = 0;
dest_roi.width = std::round(image_width / scale);
dest_roi.height = dest_height;
}
return SUCCESS;
}
__global__ void PadResizeNormalKernal(uchar3* src, int src_width, int src_height, float* dest, int dest_width, int dest_height, int roi_width, int roi_height,
int mean_b, int mean_g, int mean_r, float var_b, float var_g, float var_r)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= roi_width || iy >= roi_height)
{
return;
}
float scale_x = (float)(src_width) / roi_width;
float scale_y = (float)(src_height) / roi_height;
float px = (ix + 0.5) * scale_x - 0.5;
float py = (iy + 0.5) * scale_y - 0.5;
int src_p00_x = ::floor(px);
if (src_p00_x < 0)
{
src_p00_x = 0;
px = 0;
}
if (src_p00_x >= src_width - 1)
{
src_p00_x = src_width - 2;
px = src_p00_x;
}
float dist_p_p00_x = px - src_p00_x;
float dist_p_p01_x = 1.f - dist_p_p00_x;
int src_p00_y = ::floor(py);
if (src_p00_y < 0)
{
src_p00_y = 0;
py = 0;
}
if (src_p00_y >= src_height - 1)
{
src_p00_y = src_height - 2;
py = src_p00_y;
}
float dist_p_p00_y = py - src_p00_y;
float dist_p_p10_y = 1.f - dist_p_p00_y;
float alph00 = dist_p_p00_x * dist_p_p00_y; //
float alph01 = dist_p_p01_x * dist_p_p00_y; //
float alph10 = dist_p_p00_x * dist_p_p10_y; //
float alph11 = dist_p_p01_x * dist_p_p10_y; //
uchar dest_uint8_b = src[src_p00_x + src_width * src_p00_y].x * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].x * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].x * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].x * alph00;
dest[ix + dest_width * iy + (dest_width * dest_height) * 2] = (dest_uint8_b - mean_b) * var_b;
// float dest_val = (dest_uint8_b - mean_b) * var_b;
// printf("=====>>> %d, %0.4f\n", dest_uint8_b, dest_val);
int dest_uint8_g = src[src_p00_x + src_width * src_p00_y].y * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].y * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].y * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].y * alph00;
dest[ix + dest_width * iy + (dest_width * dest_height) * 1] = (dest_uint8_g - mean_g) * var_g;
int dest_uint8_r = src[src_p00_x + src_width * src_p00_y].z * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].z * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].z * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].z * alph00;
dest[ix + dest_width * iy + (dest_width * dest_height) * 0] = (dest_uint8_r - mean_r) * var_r;
}
Result Operate::CvPadResizeGpu(uchar3 *src_dev, const uint32_t &src_width, const uint32_t &src_height,
const uint32_t &dest_width, const uint32_t &dest_height,
const std::vector<int> &means, const std::vector<float> &vars,
float &scale, float *dest_dev)
{
cv::Rect paste_roi;
PadResize(src_width, src_height, dest_width, dest_height, paste_roi, scale);
std::cout << paste_roi.width << ", " << paste_roi.height << std::endl;
std::cout << dest_width << ", " << dest_height << std::endl;
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
hipLaunchKernelGGL(( PadResizeNormalKernal), dim3(grid), dim3(block_), 0, 0, src_dev, src_width, src_height, dest_dev, dest_width, dest_height,
paste_roi.width, paste_roi.height, means[2],means[1], means[0], vars[2], vars[1], vars[0]);
hipDeviceSynchronize();
return SUCCESS;
} | f658dec914b21c730bfd470f917746c15ac992ca.cu | #include "operate.cuh"
#include <cmath>
__global__ void ResizeKernal(uchar3* src, int src_width, int src_height, uchar3* dest, int dest_width, int dest_height, float fx, float fy)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= dest_width || iy >= dest_height)
{
return;
}
int src_px = std::floor(ix / fx);
int src_py = std::floor(iy / fy);
src_px = src_px > src_width ? src_width : src_px;
src_px = src_px < 0 ? 0 : src_px;
src_py = src_py > src_height ? src_height : src_py;
src_py = src_py < 0 ? 0 : src_py;
dest[ix + dest_width * iy] = src[src_px + src_width * src_py];
}
Result Operate::CvResize(const cv::Mat &src, const uint32_t &dest_width, const uint32_t &dest_height, cv::Mat &dest)
{
dest.release();
int channel_num = src.channels();
int width = src.cols;
int height = src.rows;
int step = src.step;
printf("src image width: %d, height: %d, channels: %d, step: %d\n", width, height, channel_num, step);
float fx = float(dest_width) / width;
float fy = float(dest_height) / height;
dest.create(cv::Size(dest_width, dest_height), src.type());
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
uchar3* src_dev = nullptr;
uchar3* dest_dev = nullptr;
int src_bytes_size = height * width * sizeof(uchar3);
int dest_bytes_size = dest.rows * dest.cols * sizeof(uchar3);
cudaMalloc((void**)&src_dev, src_bytes_size);
cudaMalloc((void**)&dest_dev, dest_bytes_size);
cudaMemcpy(src_dev, src.data, src_bytes_size, cudaMemcpyHostToDevice);
ResizeKernal<<<grid, block_>>>(src_dev, width, height, dest_dev, dest_width, dest_height, fx, fy);
cudaDeviceSynchronize();
cudaMemcpy(dest.data, dest_dev, dest_bytes_size, cudaMemcpyDeviceToHost);
cudaFree(src_dev);
cudaFree(dest_dev);
return SUCCESS;
}
//待求点的像素值 = 左上点的像素值 x 右下矩形的面积 + 左下点的像素值 x 右上矩形的面积 + 右上点的像素值 x 左下矩形的面积 + 右下点的像素值 x 左上矩形的面积。
__global__ void BiLinearKernal(uchar3* src, int src_width, int src_height, uchar3* dest, int dest_width, int dest_height)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= dest_width || iy >= dest_height)
{
return;
}
float scale_x = (float)(src_width) / dest_width;
float scale_y = (float)(src_height) / dest_height;
float px = (ix + 0.5) * scale_x - 0.5;
float py = (iy + 0.5) * scale_y - 0.5;
int src_p00_x = std::floor(px);
if (src_p00_x < 0)
{
src_p00_x = 0;
px = 0;
}
if (src_p00_x >= src_width - 1)
{
src_p00_x = src_width - 2;
px = src_p00_x;
}
float dist_p_p00_x = px - src_p00_x;
float dist_p_p01_x = 1.f - dist_p_p00_x;
int src_p00_y = std::floor(py);
if (src_p00_y < 0)
{
src_p00_y = 0;
py = 0;
}
if (src_p00_y >= src_height - 1)
{
src_p00_y = src_height - 2;
py = src_p00_y;
}
float dist_p_p00_y = py - src_p00_y;
float dist_p_p10_y = 1.f - dist_p_p00_y;
float alph00 = dist_p_p00_x * dist_p_p00_y; // 左上面积
float alph01 = dist_p_p01_x * dist_p_p00_y; // 右上面积
float alph10 = dist_p_p00_x * dist_p_p10_y; // 左下面积
float alph11 = dist_p_p01_x * dist_p_p10_y; // 右下面积
dest[ix + dest_width * iy].x = src[src_p00_x + src_width * src_p00_y].x * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].x * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].x * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].x * alph00;
dest[ix + dest_width * iy].y = src[src_p00_x + src_width * src_p00_y].y * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].y * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].y * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].y * alph00;
dest[ix + dest_width * iy].z = src[src_p00_x + src_width * src_p00_y].z * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].z * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].z * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].z * alph00;
}
Result Operate::CvBiLinearResize(const cv::Mat &src, const uint32_t &dest_width, const uint32_t &dest_height, cv::Mat &dest)
{
dest.release();
int channel_num = src.channels();
int width = src.cols;
int height = src.rows;
int step = src.step;
printf("src image width: %d, height: %d, channels: %d, step: %d\n", width, height, channel_num, step);
dest.create(cv::Size(dest_width, dest_height), src.type());
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
uchar3* src_dev = nullptr;
uchar3* dest_dev = nullptr;
int src_bytes_size = height * width * sizeof(uchar3);
int dest_bytes_size = dest.rows * dest.cols * sizeof(uchar3);
cudaMalloc((void**)&src_dev, src_bytes_size);
cudaMalloc((void**)&dest_dev, dest_bytes_size);
cudaMemcpy(src_dev, src.data, src_bytes_size, cudaMemcpyHostToDevice);
BiLinearKernal<<<grid, block_>>>(src_dev, width, height, dest_dev, dest_width, dest_height);
cudaDeviceSynchronize();
cudaMemcpy(dest.data, dest_dev, dest_bytes_size, cudaMemcpyDeviceToHost);
cudaFree(src_dev);
cudaFree(dest_dev);
return SUCCESS;
}
__global__ void PadBiLinearKernal(uchar3* src, int src_width, int src_height, uchar3* dest, int dest_width, int dest_height, int roi_width, int roi_height)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= roi_width || iy >= roi_height)
{
return;
}
float scale_x = (float)(src_width) / roi_width;
float scale_y = (float)(src_height) / roi_height;
float px = (ix + 0.5) * scale_x - 0.5;
float py = (iy + 0.5) * scale_y - 0.5;
int src_p00_x = std::floor(px);
if (src_p00_x < 0)
{
src_p00_x = 0;
px = 0;
}
if (src_p00_x >= src_width - 1)
{
src_p00_x = src_width - 2;
px = src_p00_x;
}
float dist_p_p00_x = px - src_p00_x;
float dist_p_p01_x = 1.f - dist_p_p00_x;
int src_p00_y = std::floor(py);
if (src_p00_y < 0)
{
src_p00_y = 0;
py = 0;
}
if (src_p00_y >= src_height - 1)
{
src_p00_y = src_height - 2;
py = src_p00_y;
}
float dist_p_p00_y = py - src_p00_y;
float dist_p_p10_y = 1.f - dist_p_p00_y;
float alph00 = dist_p_p00_x * dist_p_p00_y; // 左上面积
float alph01 = dist_p_p01_x * dist_p_p00_y; // 右上面积
float alph10 = dist_p_p00_x * dist_p_p10_y; // 左下面积
float alph11 = dist_p_p01_x * dist_p_p10_y; // 右下面积
dest[ix + dest_width * iy].x = src[src_p00_x + src_width * src_p00_y].x * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].x * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].x * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].x * alph00;
// float dest_val = (dest[ix + dest_width * iy].x) * 1.0f / 255;
// printf("====>>> %0.4f", dest_val);
dest[ix + dest_width * iy].y = src[src_p00_x + src_width * src_p00_y].y * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].y * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].y * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].y * alph00;
dest[ix + dest_width * iy].z = src[src_p00_x + src_width * src_p00_y].z * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].z * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].z * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].z * alph00;
}
Result Operate::CvPadResize(const cv::Mat &src, const uint32_t &dest_width, const uint32_t &dest_height,
float &scale, cv::Rect &paste_roi, cv::Mat &dest)
{
dest.release();
int channel_num = src.channels();
int width = src.cols;
int height = src.rows;
int step = src.step;
printf("src image width: %d, height: %d, channels: %d, step: %d\n", width, height, channel_num, step);
dest.create(cv::Size(dest_width, dest_height), src.type());
PadResize(width, height, dest_width, dest_height, paste_roi, scale);
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
uchar3* src_dev = nullptr;
uchar3* dest_dev = nullptr;
int src_bytes_size = height * width * sizeof(uchar3);
int dest_bytes_size = dest.rows * dest.cols * sizeof(uchar3);
cudaMalloc((void**)&src_dev, src_bytes_size);
cudaMalloc((void**)&dest_dev, dest_bytes_size);
cudaMemcpy(src_dev, src.data, src_bytes_size, cudaMemcpyHostToDevice);
PadBiLinearKernal<<<grid, block_>>>(src_dev, width, height, dest_dev, dest_width, dest_height, paste_roi.width, paste_roi.height);
cudaDeviceSynchronize();
cudaMemcpy(dest.data, dest_dev, dest_bytes_size, cudaMemcpyDeviceToHost);
cudaFree(src_dev);
cudaFree(dest_dev);
return SUCCESS;
}
Result Operate::PadResize(const uint32_t &image_width, const uint32_t &image_height, const uint32_t &dest_width, const uint32_t &dest_height,
cv::Rect &dest_roi, float &scale)
{
scale = 1.0f;
if (image_width <= (int)dest_width && image_height <= (int)dest_height)
{
dest_roi.x = 0;
dest_roi.y = 0;
dest_roi.width = image_width;
dest_roi.height = image_height;
}
else if (float(image_width) / image_height >= float(dest_width) / dest_height) // 按照width缩放
{
scale = float(image_width) / dest_width;
dest_roi.x = 0;
dest_roi.y = 0;
dest_roi.width = dest_width;
dest_roi.height = std::round(image_height / scale);
}
else // 按照height缩放
{
scale = float(image_height) / dest_height;
dest_roi.x = 0;
dest_roi.y = 0;
dest_roi.width = std::round(image_width / scale);
dest_roi.height = dest_height;
}
return SUCCESS;
}
__global__ void PadResizeNormalKernal(uchar3* src, int src_width, int src_height, float* dest, int dest_width, int dest_height, int roi_width, int roi_height,
int mean_b, int mean_g, int mean_r, float var_b, float var_g, float var_r)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= roi_width || iy >= roi_height)
{
return;
}
float scale_x = (float)(src_width) / roi_width;
float scale_y = (float)(src_height) / roi_height;
float px = (ix + 0.5) * scale_x - 0.5;
float py = (iy + 0.5) * scale_y - 0.5;
int src_p00_x = std::floor(px);
if (src_p00_x < 0)
{
src_p00_x = 0;
px = 0;
}
if (src_p00_x >= src_width - 1)
{
src_p00_x = src_width - 2;
px = src_p00_x;
}
float dist_p_p00_x = px - src_p00_x;
float dist_p_p01_x = 1.f - dist_p_p00_x;
int src_p00_y = std::floor(py);
if (src_p00_y < 0)
{
src_p00_y = 0;
py = 0;
}
if (src_p00_y >= src_height - 1)
{
src_p00_y = src_height - 2;
py = src_p00_y;
}
float dist_p_p00_y = py - src_p00_y;
float dist_p_p10_y = 1.f - dist_p_p00_y;
float alph00 = dist_p_p00_x * dist_p_p00_y; // 左上面积
float alph01 = dist_p_p01_x * dist_p_p00_y; // 右上面积
float alph10 = dist_p_p00_x * dist_p_p10_y; // 左下面积
float alph11 = dist_p_p01_x * dist_p_p10_y; // 右下面积
uchar dest_uint8_b = src[src_p00_x + src_width * src_p00_y].x * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].x * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].x * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].x * alph00;
dest[ix + dest_width * iy + (dest_width * dest_height) * 2] = (dest_uint8_b - mean_b) * var_b;
// float dest_val = (dest_uint8_b - mean_b) * var_b;
// printf("=====>>> %d, %0.4f\n", dest_uint8_b, dest_val);
int dest_uint8_g = src[src_p00_x + src_width * src_p00_y].y * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].y * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].y * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].y * alph00;
dest[ix + dest_width * iy + (dest_width * dest_height) * 1] = (dest_uint8_g - mean_g) * var_g;
int dest_uint8_r = src[src_p00_x + src_width * src_p00_y].z * alph11
+ src[src_p00_x + 1 + src_width * src_p00_y].z * alph10
+ src[src_p00_x + src_width * (src_p00_y + 1)].z * alph01
+ src[src_p00_x + 1 + src_width * (src_p00_y + 1)].z * alph00;
dest[ix + dest_width * iy + (dest_width * dest_height) * 0] = (dest_uint8_r - mean_r) * var_r;
}
Result Operate::CvPadResizeGpu(uchar3 *src_dev, const uint32_t &src_width, const uint32_t &src_height,
const uint32_t &dest_width, const uint32_t &dest_height,
const std::vector<int> &means, const std::vector<float> &vars,
float &scale, float *dest_dev)
{
cv::Rect paste_roi;
PadResize(src_width, src_height, dest_width, dest_height, paste_roi, scale);
std::cout << paste_roi.width << ", " << paste_roi.height << std::endl;
std::cout << dest_width << ", " << dest_height << std::endl;
dim3 grid((dest_width - 1) / block_.x + 1, (dest_height - 1) / block_.y + 1);
PadResizeNormalKernal<<<grid, block_>>>(src_dev, src_width, src_height, dest_dev, dest_width, dest_height,
paste_roi.width, paste_roi.height, means[2],means[1], means[0], vars[2], vars[1], vars[0]);
cudaDeviceSynchronize();
return SUCCESS;
} |
ba3ba2dd26fd561ca4d28aba0137dc331c002815.hip | // !!! This is a file automatically generated by hipify!!!
#include <catboost/cuda/cuda_util/kernel/sort_templ.cuh>
namespace NKernel {
template hipError_t RadixSort(bool* keys, uchar* values, ui32 size, TRadixSortContext& context, TCudaStream stream);
}
| ba3ba2dd26fd561ca4d28aba0137dc331c002815.cu | #include <catboost/cuda/cuda_util/kernel/sort_templ.cuh>
namespace NKernel {
template cudaError_t RadixSort(bool* keys, uchar* values, ui32 size, TRadixSortContext& context, TCudaStream stream);
}
|
5b23b1d1a8b0043b576beac29efcc18befbfe8bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_complex.h"
#include <stdio.h>
#include <algorithm>
#include <numeric>
#include <iostream>
#include <iomanip>
#include <chrono>
#include <fstream>
#define _USE_MATH_DEFINES
#include <math.h>
#include <mutex>
#include <thread>
#include <complex>
struct dataPoint
{
int u;
int v;
std::complex<float> vis;
};
std::istream& operator>>(std::ifstream& input, struct dataPoint* data) {
char x;
input >> data->u;
input >> x;
if (x != ',') { input.setstate(std::ios_base::failbit); }
input >> data->v;
input >> x;
if (x != ',') { input.setstate(std::ios_base::failbit); }
input >> data->vis;
return input;
}
__global__ void updateWithRowShift(cuFloatComplex* dev_matrix, hipTextureObject_t dev_vector, const int matrix_dim, const int shift)
{
int start_index = blockIdx.y * blockDim.x * matrix_dim + blockIdx.x * blockDim.x + threadIdx.x;
int vectorPos = (shift * (blockIdx.y * blockDim.x) + threadIdx.x + blockIdx.x * blockDim.x) % matrix_dim;
for (int i = 0; i < blockDim.x; i++) {
dev_matrix[start_index + i * matrix_dim].x += tex2D<float>(dev_vector, 2*vectorPos, 0);
dev_matrix[start_index + i * matrix_dim].y += tex2D<float>(dev_vector, 2*vectorPos + 1, 0);
vectorPos += shift;
vectorPos %= matrix_dim;
}
}
__global__ void updateWithColumnShift(cuFloatComplex* dev_matrix, hipTextureObject_t dev_vector, const int matrix_dim, const int shift)
{
int start_index = blockIdx.y * blockDim.x * matrix_dim + blockIdx.x * blockDim.x + threadIdx.x;
int vectorPos = (shift * (threadIdx.x + blockDim.x * blockIdx.x) + blockIdx.y * blockDim.x) % matrix_dim;
cuFloatComplex dev_matrix_point;
for (int i = 0; i < blockDim.x; i++) {
dev_matrix_point = dev_matrix[start_index + i * matrix_dim];
dev_matrix_point.x += tex2D<float>(dev_vector, 2 * vectorPos, 0);
dev_matrix_point.y += tex2D<float>(dev_vector, 2 * vectorPos + 1, 0);
dev_matrix[start_index + i * matrix_dim] = dev_matrix_point;
vectorPos++;
vectorPos %= matrix_dim;
}
}
__global__ void divideN2(cuFloatComplex* dev_matrix, const int matrix_dim) {
int start_index = blockIdx.x * blockDim.x * matrix_dim + blockIdx.y * blockDim.x + threadIdx.x;
for (int i = 0; i < blockDim.x; i++) {
dev_matrix[start_index + i * matrix_dim].x /= (matrix_dim * matrix_dim);
dev_matrix[start_index + i * matrix_dim].y /= (matrix_dim * matrix_dim);
}
}
__global__ void sumResults(cuFloatComplex* target, cuFloatComplex* source, const int matrix_dim) {
int start_index = blockIdx.x * blockDim.x * matrix_dim + blockIdx.y * blockDim.x + threadIdx.x;
for (int i = 0; i < blockDim.x; i++) {
target[start_index + i * matrix_dim].x += source[start_index + i * matrix_dim].x;
target[start_index + i * matrix_dim].y += source[start_index + i * matrix_dim].y;
}
}
class spift
{
public:
spift(const int matrixDim, const int blockDim, const int GPU_index[], const int nrGPUs, const int concurrency);
~spift();
spift(const spift&) = delete;
hipError_t prepareGPU(int pos);
hipError_t initTexture(int pos);
hipError_t iterate(int pos, int start, int end);
hipError_t iteration(int shift, int pos);
void splitIteration();
hipError_t getResult(int pos);
bool shiftType(int u, int v);
int shiftIndex(int u, int v, bool isRowShift);
float* computeShift(int u, int v, std::complex<float> vis, bool isRowShift);
void initTwiddle();
void writeToFile(std::ofstream* file);
void printResult();
bool testResult(std::string original);
void launchRead();
void readInput(int threadid);
hipError_t combine2Cards(int card1, int card2);
hipError_t aggregateResult();
void initResult();
void initCoalescence();
void getShiftVector(int i, int pos);
private:
//the dimensions of the grid and blocks
const int matrixDim;
const int blockDim;
dim3* blockDim3;
dim3* gridDim3;
//the Matrix where the result is loaded into in the end
cuFloatComplex* result;
//the device-matrix, where the current state is saved
cuFloatComplex** dev_matrix;
//the texture object where the current shift is saved during kernel execution
hipTextureObject_t** texObj;
//the data in texobj
hipArray** *cuArray;
int readThreads;
//the aggregation of shifts, first half are rowShifts
float** coalescence;
//the index, where it is saved, wheter data is aggregated for this shift
int* coalescenceSet;
std::mutex** shiftIndexMutex;
//boolean wheter execution is done
int* done;
//the Index of the GPU on which it is executed
int *GPUIndex;
const int nrGPUS;
//the precomputed twiddleFactors
std::complex<float>* twiddleFactors;
std::ifstream* inputStream;
//testing
int count;
//measuring the execution time
unsigned long long durationTotal=0;
unsigned long long durationShiftProcessing=0;
unsigned long long durationShiftProcessingGenerating = 0;
unsigned long long durationShiftProcessingType = 0;
unsigned long long durationShiftProcessingShift=0;
unsigned long long durationShiftProcessingVector=0;
unsigned long long durationShiftAggregating=0;
unsigned long long durationRead=0;
unsigned long long durationFinal=0;
unsigned long long durationWhileRead=0;
unsigned long long durationUpload[6] = {0, 0, 0, 0, 0, 0};
unsigned long long durationFinalPrep=0;
unsigned long long durationRow[6] = { 0, 0, 0, 0, 0, 0 };
unsigned long long durationColumn[6] = { 0, 0, 0, 0, 0, 0 };
long nrUpdatesFinal = 0;
long nrRowUpdates = 0;
std::mutex* readMutex;
std::mutex* fileReadMutex;
int** input;
int total;
int length_input;
};
spift::spift(const int matrixDim, const int blockDim, const int GPU_index[], const int nrGPUs, const int concurrency) : matrixDim(matrixDim), blockDim(blockDim), nrGPUS(nrGPUs), readThreads(concurrency)
{
this->initResult();
this->initCoalescence();
this->initTwiddle();
//this->inputStream = new std::ifstream("testData1024.txt");
struct dataPoint next;
int nr_blocks = ceil((double)matrixDim / (double)this->blockDim);
this->blockDim3 = new dim3(this->blockDim, 1);
this->gridDim3 = new dim3(nr_blocks, nr_blocks);
hipError_t cudaStatus;
this->dev_matrix = new cuFloatComplex * [nrGPUs];
this->cuArray = new hipArray** [nrGPUs];
this->texObj = new hipTextureObject_t*[nrGPUs];
this->GPUIndex = new int[nrGPUs];
for (int i = 0; i < nrGPUs; ++i) {
this->GPUIndex[i] = GPU_index[i];
this->cuArray[i] = new hipArray * ();
this->texObj[i] = new hipTextureObject_t();
cudaStatus = prepareGPU(i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "GPU init failed: %s\n", hipGetErrorString(cudaStatus));
return;
}
cudaStatus = this->initTexture(i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "GPU texture init failed: %s\n", hipGetErrorString(cudaStatus));
return;
}
}
//this->total = this->matrixDim * this->matrixDim;
//testing
this->total = 4194304;
this->readMutex = new std::mutex();
//this->fileReadMutex = new std::mutex();
this->input = new int* [readThreads];
this->length_input = (int)(::ceil(2 * this->total / this->readThreads) * 1.5);
for (int j = 0; j < this->readThreads; j++) {
this->input[j] = new int[length_input];
for (int i = 0; i < length_input; i++) {
this->input[j][i] = std::rand() % this->matrixDim;
}
}
}
void spift::initResult() {
this->result = new cuFloatComplex[this->matrixDim * this->matrixDim];
//allocate memory for result
for (int x = 0; x < this->matrixDim * this->matrixDim; ++x) {
cuFloatComplex next;
next.x = 0;
next.y = 0;
this->result[x] = next;
}
}
void spift::initCoalescence()
{
this->shiftIndexMutex = new std::mutex*[matrixDim * 2];
this->done = new int(0);
this->coalescenceSet = new int[matrixDim * 2];
this->coalescence = new float* [matrixDim * 2];
for (int i = 0; i < matrixDim * 2; ++i) {
this->coalescenceSet[i] = 0;
this->shiftIndexMutex[i] = new std::mutex();
this->coalescence[i] = 0;
//this->coalescence[i] = new float[matrixDim * 2];
}
}
hipError_t spift::prepareGPU(int pos) {
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(this->GPUIndex[pos]);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cudaStatus;
}
// Allocate GPU buffers for matrix
cudaStatus = hipMalloc((void**)&(this->dev_matrix[pos]), this->matrixDim * this->matrixDim * sizeof(cuFloatComplex));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Copy input Matrix from host memory to GPU buffers.
cudaStatus = hipSetDevice(this->GPUIndex[pos]);
cudaStatus = hipMemcpy((this->dev_matrix[pos]), this->result, this->matrixDim * this->matrixDim * sizeof(cuFloatComplex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
// Allocate CUDA array in device memory
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipMallocArray(cuArray[pos], &channelDesc, matrixDim * 2, 1);
return hipSuccess;
}
void spift::writeToFile(std::ofstream* file) {
long long durationRowUpdatelocal = 0;
long long durationColumnUpdatelocal = 0;
long long durationUploadLocal = 0;
for (int i = 0; i < nrGPUS; ++i) {
durationRowUpdatelocal += this->durationRow[i];
durationColumnUpdatelocal += this->durationColumn[i];
durationUploadLocal += this->durationUpload[i];
}
*file << this->readThreads << "\t" << this->matrixDim << "\t" << this->nrGPUS << "\t" << this->durationTotal << "\t" << this->count << "\t" << this->durationWhileRead << "\t" << this->durationFinal << "\t" << this->durationFinalPrep << "\t"<< durationRowUpdatelocal << "\t" << durationColumnUpdatelocal << "\t" << this->nrRowUpdates << "\t" << durationUploadLocal << "\t" << this->durationRead << "\t" << this->durationShiftProcessing << "\t" << this->durationShiftAggregating << "\t" << this->durationShiftProcessingType << "\t" << this->durationShiftProcessingShift << "\t" << this->durationShiftProcessingVector << "\t" << this->durationShiftProcessingGenerating << "\t" << this->nrUpdatesFinal << "\t" << this->blockDim << std::endl;
//*file << this->readThreads << "\t" << this->matrixDim << "\t" << this->nrGPUS << "\t" << this->blockDim << "\t" << this->durationTotal << "\t"<< durationRowUpdatelocal << "\t" << durationColumnUpdatelocal << "\t" << this->nrRowUpdates << "\t" << std::endl;
}
void spift::printResult() {
for (int i = 0; i < matrixDim; ++i) {
for (int j = 0; j < matrixDim; ++j) {
std::cout << "(" << std::setfill(' ') << std::setw(4) << std::roundf(this->result[this->matrixDim * i + j].x * 100) / 100 << ", " << std::setfill(' ') << std::setw(4) << std::roundf(this->result[this->matrixDim * i + j].y * 100) / 100 << "),\t";
}
std::cout << std::endl;
}
}
bool spift::testResult(std::string original){
std::ifstream originalFile(original);
double pos;
int count = 0;
double totalError = 0.0;
for (int i = 0; i < this->matrixDim; ++i) {
for (int j = 0; j < this->matrixDim; ++j) {
originalFile >> pos;
if (abs(this->result[this->matrixDim * i + j].x - pos) > 0.0001 || abs(this->result[this->matrixDim * i + j].y) > 0.0001) {
count++;
}
totalError += abs(this->result[this->matrixDim * i + j].x - pos);
totalError += abs(this->result[this->matrixDim * i + j].y);
}
}
std::cout << this->matrixDim * this->matrixDim << ", " << count << ", totalError: " << totalError << ", averageError: " << totalError / (this->matrixDim * this->matrixDim) << std::endl;
return count == 0;
}
void spift::launchRead() {
std::thread** threads = new std::thread * [this->readThreads];
auto t1 = std::chrono::high_resolution_clock::now();
for (int i = 0; i < readThreads; ++i) {
threads[i] = new std::thread(&spift::readInput, this, i);
}
for (int i = 0; i < readThreads; ++i) {
threads[i]->join();
delete threads[i];
}
delete threads;
auto t2 = std::chrono::high_resolution_clock::now();
this->durationRead = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
}
void spift::readInput(int threadID) {
struct dataPoint next;
next.vis = std::complex<float>(std::rand(), std::rand());
int readPos = 0;
unsigned long long localdurationShiftProcessing = 0;
unsigned long long localdurationShiftAggregating = 0;
unsigned long long localdurationShiftProcessingGenerating = 0;
unsigned long long localdurationShiftProcessingType = 0;
unsigned long long localdurationShiftProcessingShift = 0;
unsigned long long localdurationShiftProcessingVector = 0;
int counter = 0;
//this->fileReadMutex->lock();
while(this->total > 0) {
auto t1 = std::chrono::high_resolution_clock::now();
this->total--;
/*
*(this->inputStream) >> &next;
this->fileReadMutex->unlock();
*/
next.u = this->input[threadID][readPos];
readPos++;
next.v = this->input[threadID][readPos];
readPos++;
readPos %= this->length_input;
int posiCoal;
auto t11 = std::chrono::high_resolution_clock::now();
bool isRowShift = this->shiftType(next.u, next.v);
auto t12 = std::chrono::high_resolution_clock::now();
int shiftIdx = this->shiftIndex(next.u, next.v, isRowShift);
auto t13 = std::chrono::high_resolution_clock::now();
float* vector = this->computeShift(next.u, next.v, next.vis, isRowShift);
if (isRowShift) { posiCoal = shiftIdx; }
else { posiCoal = shiftIdx + this->matrixDim; }
auto t2 = std::chrono::high_resolution_clock::now();
this->shiftIndexMutex[posiCoal]->lock();
if (this->coalescenceSet[posiCoal]) {
for (int j = 0; j < this->matrixDim * 2; ++j) {
this->coalescence[posiCoal][j] += vector[j];
}
delete vector;
}
else {
this->coalescence[posiCoal] = vector;
this->coalescenceSet[posiCoal] = 1;
}
this->shiftIndexMutex[posiCoal]->unlock();
auto t3 = std::chrono::high_resolution_clock::now();
// std::cout << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() << ", " << std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count() << std::endl;
localdurationShiftProcessing += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
localdurationShiftAggregating += std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count();
localdurationShiftProcessingGenerating += std::chrono::duration_cast<std::chrono::microseconds>(t11 - t1).count();
localdurationShiftProcessingType += std::chrono::duration_cast<std::chrono::microseconds>(t12 - t11).count();
localdurationShiftProcessingShift += std::chrono::duration_cast<std::chrono::microseconds>(t13- t12).count();
localdurationShiftProcessingVector += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t13).count();
counter++;
//this->fileReadMutex->lock();
}
//this->fileReadMutex->unlock();
//std::cout << "counter: " << localdurationShiftAggregating << ", " << localdurationShiftProcessing << std::endl;
*(this->done) = 1;
this->readMutex->lock();
this->durationShiftAggregating += localdurationShiftAggregating;
this->durationShiftProcessing += localdurationShiftProcessing;
this->durationShiftProcessingType += localdurationShiftProcessingType;
this->durationShiftProcessingGenerating += localdurationShiftProcessingGenerating;
this->durationShiftProcessingShift += localdurationShiftProcessingShift;
this->durationShiftProcessingVector += localdurationShiftProcessingVector;
this->readMutex->unlock();
}
hipError_t spift::combine2Cards(int sourceCard, int targetCard) {
cuFloatComplex* temp;
hipSetDevice(this->GPUIndex[targetCard]);
auto cudaStatus = hipMalloc((void**)&temp, this->matrixDim * this->matrixDim * sizeof(cuFloatComplex));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Malloc for combine2Cards failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = hipMemcpyPeer(temp, this->GPUIndex[targetCard], this->dev_matrix[sourceCard], this->GPUIndex[sourceCard], this->matrixDim * this->matrixDim * sizeof(cuFloatComplex));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Memcpy for combine2Cards failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
hipSetDevice(this->GPUIndex[targetCard]);
sumResults << <*gridDim3, * blockDim3 >> > (this->dev_matrix[targetCard], temp, this->matrixDim);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "sumResults launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
hipFree(temp);
return hipSuccess;
}
hipError_t spift::aggregateResult() {
hipError_t cudaStatus;
for (int i = 1; i < this->nrGPUS; ++i) {
cudaStatus = this->combine2Cards(i - 1, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Memcpy for combine2Cards failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
return hipSuccess;
}
void spift::splitIteration() {
this->count = 0;
std::thread** threads = new std::thread*[this->nrGPUS];
auto t1 = std::chrono::high_resolution_clock::now();
for (int i = 0; i < this->nrGPUS; ++i) {
threads[i] = new std::thread(&spift::iterate, this, i, round((float)this->matrixDim * 2 / this->nrGPUS * i), round((float)this->matrixDim * 2 / this->nrGPUS * (i+1)));
}
for (int i = 0; i < this->nrGPUS; ++i) {
threads[i]->join();
}
auto t15 = std::chrono::high_resolution_clock::now();
auto cudaStatus = this->aggregateResult();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Aggregation failed\n", hipGetErrorString(cudaStatus));
return;
}
cudaStatus = this->getResult(this->nrGPUS-1);
auto t2 = std::chrono::high_resolution_clock::now();
this->durationTotal = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
this->durationFinalPrep = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t15).count();
//std::cout << "Total number of aggregations: " << this->count << std::endl;
if (cudaStatus != hipSuccess) {
fprintf(stderr, "getResult failed\n", hipGetErrorString(cudaStatus));
return;
}
}
hipError_t spift::iterate(int pos, int start, int end) {
hipError_t cudaStatus = hipSuccess;
auto t1 = std::chrono::high_resolution_clock::now();
while(!*done) {
for (int shiftPos = start; shiftPos < end; ++shiftPos) {
if (this->coalescenceSet[shiftPos]) {
//std::cout << shiftPos << std::endl;
if (this->shiftIndexMutex[shiftPos]->try_lock()) {
this->coalescenceSet[shiftPos] = 0;
cudaStatus = iteration(shiftPos, pos);
this->count++;
// Check for any errors in iteration
if (cudaStatus != hipSuccess) {
fprintf(stderr, "iteration %d failed: %s\n", shiftPos, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
}
}
}
auto t2 = std::chrono::high_resolution_clock::now();
int counter2 = 0;
for (int shiftPos = start; shiftPos < end; ++shiftPos) {
if (this->coalescenceSet[shiftPos]) {
if (this->shiftIndexMutex[shiftPos]->try_lock()) {
counter2++;
//std::cout << shiftPos << std::endl;
this->coalescenceSet[shiftPos] = 0;
cudaStatus = iteration(shiftPos, pos);
this->count++;
// Check for any errors in iteration
if (cudaStatus != hipSuccess) {
fprintf(stderr, "iteration %d failed: %s\n", shiftPos, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
}
}
this->nrUpdatesFinal += counter2;
auto t3 = std::chrono::high_resolution_clock::now();
this->durationWhileRead += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
this->durationFinal += std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count();
return cudaStatus;
}
hipError_t spift::getResult(int pos) {
// Copy output vector from GPU buffer to host memory.
auto cudaStatus = hipSetDevice(this->GPUIndex[pos]);
divideN2 << <*gridDim3, * blockDim3 >> > (dev_matrix[pos], this->matrixDim);
cudaStatus = hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "divideN2 launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = hipMemcpy(result, dev_matrix[pos], matrixDim * matrixDim * sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
return hipSuccess;
}
void spift::getShiftVector(int i, int pos) {
auto t1 = std::chrono::high_resolution_clock::now();
hipSetDevice(this->GPUIndex[pos]);
auto cudaStatus = hipMemcpyToArray(*cuArray[pos], 0, 0, this->coalescence[i], matrixDim * 2 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "updateWithRowShift launch failed: %s\n", hipGetErrorString(cudaStatus));
}
delete this->coalescence[i];
auto t2= std::chrono::high_resolution_clock::now();
this->durationUpload[pos] += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
}
hipError_t spift::iteration(int shift, int pos) {
getShiftVector(shift, pos);
hipSetDevice(this->GPUIndex[pos]);
auto t1 = std::chrono::high_resolution_clock::now();
hipError_t cudaStatus;
if (shift < this->matrixDim) {
updateWithRowShift << <*gridDim3, *blockDim3 >> > (dev_matrix[pos], *texObj[pos], this->matrixDim, shift);
cudaStatus = hipSetDevice(this->GPUIndex[pos]);
cudaStatus = hipDeviceSynchronize();
auto t2 = std::chrono::high_resolution_clock::now();
this->durationRow[pos] += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
this->nrRowUpdates++;
}
else {
updateWithColumnShift << <*gridDim3, *blockDim3 >> > (dev_matrix[pos], *texObj[pos], this->matrixDim, shift - this->matrixDim);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipSetDevice(this->GPUIndex[pos]);
cudaStatus = hipDeviceSynchronize();
auto t2 = std::chrono::high_resolution_clock::now();
this->durationColumn[pos] += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "updateWithRowShift launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d: %s after launching Kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
return cudaStatus;
}
this->shiftIndexMutex[shift]->unlock();
return hipSuccess;
}
hipError_t spift::initTexture(int pos) {
// Specify texture
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = *(this->cuArray[pos]);
// Specify texture object parameters
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 0;
// Create texture object
hipSetDevice(this->GPUIndex[pos]);
auto cudaStatus = hipCreateTextureObject(this->texObj[pos], &resDesc, &texDesc, NULL);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "textureInit failed: %s\n", hipGetErrorString(cudaStatus));
}
return hipSuccess;
}
bool spift::shiftType(int u, int v) { //true: RowShift, false: ColumnShift
return !(v == 0 || ((u % 2) && !(v % 2)) || (std::__gcd(u,this->matrixDim) < std::__gcd(v, this->matrixDim)) && v % 2 == 0);
}
int spift::shiftIndex(int u, int v, bool isRowShift) {
int uk;
int vk;
if (u == 0 || v == 0) {
return 0;
}
if (isRowShift) {
uk = u;
vk = v;
}
else
{
uk = v;
vk = u;
}
for (int j = 0; j <= this->matrixDim; ++j) {
if (uk == (j * vk) % this->matrixDim) {
return j;
}
}
std::cout << u << ", " << v << ", " << isRowShift << std::endl;
throw 15;
}
float* spift::computeShift(int u, int v, std::complex<float> vis, bool isRowShift) {
int x;
float* shift = new float[this->matrixDim * 2];
if (isRowShift) { x = v; }
else { x = u; }
for (int j = 0; j < matrixDim; ++j) {
std::complex<float> next = vis * this->twiddleFactors[(j * x) % this->matrixDim];
shift[2 * j] = next.real();
shift[2 * j + 1] = next.imag();
}
return shift;
}
void spift::initTwiddle() {
this->twiddleFactors = new std::complex<float>[this->matrixDim];
for(int k = 0; k < this->matrixDim; ++k){
std::complex<float> next = ::exp(std::complex<float>(0, k * 2 * M_PI / this->matrixDim));
this->twiddleFactors[k] = next;
}
}
spift::~spift()
{
for (int i = 0; i < this->matrixDim * 2; i++) {
delete this->shiftIndexMutex[i];
}
delete (this->shiftIndexMutex);
delete (this->result);
for (int i = 0; i < this->nrGPUS; ++i) {
hipSetDevice(this->GPUIndex[i]);
hipFree(this->dev_matrix[i]);
hipFreeArray(*(this->cuArray[i]));
hipFree(this->texObj[i]);
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
}
}
delete this->texObj;
delete this->gridDim3;
delete this->blockDim3;
for (int j = 0; j < this->readThreads; j++) {
delete this->input[j];
}
delete this->input;
delete this->GPUIndex;
delete (this->dev_matrix);
delete (this->cuArray);
delete this->readMutex;
delete (this->coalescenceSet);
delete (this->done);
delete this->twiddleFactors;
delete this->coalescence;
}
void parallel(const int nrGPUs, const int dim, std::ofstream *times, const int concurrency, const int blockDim) {
int gpuIndex[] = { 0,1,2,3,4,5};
spift* tester = new spift(dim, blockDim, gpuIndex, nrGPUs, concurrency);
//tester->launchRead();
//tester->splitIteration();
//std::cout << concurrency << "\t" << dim << "\t" << nrGPUs << std::endl;
std::thread shifts(&spift::launchRead, tester);
std::thread iter(&spift::splitIteration, tester);
shifts.join();
iter.join();
tester->writeToFile(times);
/*
if (tester->testResult("originalData1024.txt")) {
std::cout << "success" << std::endl;
}
else
{
std::cout << "failed" << std::endl;
}
*/
//tester->printResult();
delete tester;
}
int main()
{
std::ofstream times;
times.open("timesGPU.txt");
//parallel(6, ::pow(2, 12), ×, 50, 1024);
/*
for (int i = 1; i <= 1; i++) {
parallel(6, ::pow(2, 10), ×, 50, 128);
}
*/
//parallel(6, ::pow(2, 11), ×, 40, 128);
for (int concurrency = 4; concurrency < 30; concurrency += 2) {
for (int j = 10; j < 13; ++j) {
for (int i = 1; i <= 6; i++) {
std::cout << "Block B: concurrency: " << concurrency << ", dim: " << ::pow(2, j) << ", nrGPUS: " << i << std::endl;
parallel(i, ::pow(2, j), ×, concurrency,128);
}
}
}
for (int concurrency = 30; concurrency <= 80; concurrency += 5) {
for (int j = 10; j < 14; ++j) {
for (int i = 1; i <= 6; i++) {
std::cout << "Block C: concurrency: " << concurrency << ", dim: " << ::pow(2, j) << ", nrGPUS: " << i << std::endl;
parallel(i, ::pow(2, j), ×, concurrency,128);
}
}
}
for (int concurrency = 2; concurrency < 4; concurrency += 1) {
for (int j = 10; j < 13; ++j) {
for (int i = 1; i <= 6; i++) {
std::cout << "Block A: concurrency: " << concurrency << ", dim: " << ::pow(2, j) << ", nrGPUS: " << i << std::endl;
parallel(i, ::pow(2, j), ×, concurrency, 128);
}
}
}
times.close();
return 0;
}
| 5b23b1d1a8b0043b576beac29efcc18befbfe8bc.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuComplex.h"
#include <stdio.h>
#include <algorithm>
#include <numeric>
#include <iostream>
#include <iomanip>
#include <chrono>
#include <fstream>
#define _USE_MATH_DEFINES
#include <math.h>
#include <mutex>
#include <thread>
#include <complex>
struct dataPoint
{
int u;
int v;
std::complex<float> vis;
};
std::istream& operator>>(std::ifstream& input, struct dataPoint* data) {
char x;
input >> data->u;
input >> x;
if (x != ',') { input.setstate(std::ios_base::failbit); }
input >> data->v;
input >> x;
if (x != ',') { input.setstate(std::ios_base::failbit); }
input >> data->vis;
return input;
}
__global__ void updateWithRowShift(cuFloatComplex* dev_matrix, cudaTextureObject_t dev_vector, const int matrix_dim, const int shift)
{
int start_index = blockIdx.y * blockDim.x * matrix_dim + blockIdx.x * blockDim.x + threadIdx.x;
int vectorPos = (shift * (blockIdx.y * blockDim.x) + threadIdx.x + blockIdx.x * blockDim.x) % matrix_dim;
for (int i = 0; i < blockDim.x; i++) {
dev_matrix[start_index + i * matrix_dim].x += tex2D<float>(dev_vector, 2*vectorPos, 0);
dev_matrix[start_index + i * matrix_dim].y += tex2D<float>(dev_vector, 2*vectorPos + 1, 0);
vectorPos += shift;
vectorPos %= matrix_dim;
}
}
__global__ void updateWithColumnShift(cuFloatComplex* dev_matrix, cudaTextureObject_t dev_vector, const int matrix_dim, const int shift)
{
int start_index = blockIdx.y * blockDim.x * matrix_dim + blockIdx.x * blockDim.x + threadIdx.x;
int vectorPos = (shift * (threadIdx.x + blockDim.x * blockIdx.x) + blockIdx.y * blockDim.x) % matrix_dim;
cuFloatComplex dev_matrix_point;
for (int i = 0; i < blockDim.x; i++) {
dev_matrix_point = dev_matrix[start_index + i * matrix_dim];
dev_matrix_point.x += tex2D<float>(dev_vector, 2 * vectorPos, 0);
dev_matrix_point.y += tex2D<float>(dev_vector, 2 * vectorPos + 1, 0);
dev_matrix[start_index + i * matrix_dim] = dev_matrix_point;
vectorPos++;
vectorPos %= matrix_dim;
}
}
__global__ void divideN2(cuFloatComplex* dev_matrix, const int matrix_dim) {
int start_index = blockIdx.x * blockDim.x * matrix_dim + blockIdx.y * blockDim.x + threadIdx.x;
for (int i = 0; i < blockDim.x; i++) {
dev_matrix[start_index + i * matrix_dim].x /= (matrix_dim * matrix_dim);
dev_matrix[start_index + i * matrix_dim].y /= (matrix_dim * matrix_dim);
}
}
__global__ void sumResults(cuFloatComplex* target, cuFloatComplex* source, const int matrix_dim) {
int start_index = blockIdx.x * blockDim.x * matrix_dim + blockIdx.y * blockDim.x + threadIdx.x;
for (int i = 0; i < blockDim.x; i++) {
target[start_index + i * matrix_dim].x += source[start_index + i * matrix_dim].x;
target[start_index + i * matrix_dim].y += source[start_index + i * matrix_dim].y;
}
}
class spift
{
public:
spift(const int matrixDim, const int blockDim, const int GPU_index[], const int nrGPUs, const int concurrency);
~spift();
spift(const spift&) = delete;
cudaError_t prepareGPU(int pos);
cudaError_t initTexture(int pos);
cudaError_t iterate(int pos, int start, int end);
cudaError_t iteration(int shift, int pos);
void splitIteration();
cudaError_t getResult(int pos);
bool shiftType(int u, int v);
int shiftIndex(int u, int v, bool isRowShift);
float* computeShift(int u, int v, std::complex<float> vis, bool isRowShift);
void initTwiddle();
void writeToFile(std::ofstream* file);
void printResult();
bool testResult(std::string original);
void launchRead();
void readInput(int threadid);
cudaError combine2Cards(int card1, int card2);
cudaError aggregateResult();
void initResult();
void initCoalescence();
void getShiftVector(int i, int pos);
private:
//the dimensions of the grid and blocks
const int matrixDim;
const int blockDim;
dim3* blockDim3;
dim3* gridDim3;
//the Matrix where the result is loaded into in the end
cuFloatComplex* result;
//the device-matrix, where the current state is saved
cuFloatComplex** dev_matrix;
//the texture object where the current shift is saved during kernel execution
cudaTextureObject_t** texObj;
//the data in texobj
cudaArray** *cuArray;
int readThreads;
//the aggregation of shifts, first half are rowShifts
float** coalescence;
//the index, where it is saved, wheter data is aggregated for this shift
int* coalescenceSet;
std::mutex** shiftIndexMutex;
//boolean wheter execution is done
int* done;
//the Index of the GPU on which it is executed
int *GPUIndex;
const int nrGPUS;
//the precomputed twiddleFactors
std::complex<float>* twiddleFactors;
std::ifstream* inputStream;
//testing
int count;
//measuring the execution time
unsigned long long durationTotal=0;
unsigned long long durationShiftProcessing=0;
unsigned long long durationShiftProcessingGenerating = 0;
unsigned long long durationShiftProcessingType = 0;
unsigned long long durationShiftProcessingShift=0;
unsigned long long durationShiftProcessingVector=0;
unsigned long long durationShiftAggregating=0;
unsigned long long durationRead=0;
unsigned long long durationFinal=0;
unsigned long long durationWhileRead=0;
unsigned long long durationUpload[6] = {0, 0, 0, 0, 0, 0};
unsigned long long durationFinalPrep=0;
unsigned long long durationRow[6] = { 0, 0, 0, 0, 0, 0 };
unsigned long long durationColumn[6] = { 0, 0, 0, 0, 0, 0 };
long nrUpdatesFinal = 0;
long nrRowUpdates = 0;
std::mutex* readMutex;
std::mutex* fileReadMutex;
int** input;
int total;
int length_input;
};
spift::spift(const int matrixDim, const int blockDim, const int GPU_index[], const int nrGPUs, const int concurrency) : matrixDim(matrixDim), blockDim(blockDim), nrGPUS(nrGPUs), readThreads(concurrency)
{
this->initResult();
this->initCoalescence();
this->initTwiddle();
//this->inputStream = new std::ifstream("testData1024.txt");
struct dataPoint next;
int nr_blocks = ceil((double)matrixDim / (double)this->blockDim);
this->blockDim3 = new dim3(this->blockDim, 1);
this->gridDim3 = new dim3(nr_blocks, nr_blocks);
cudaError_t cudaStatus;
this->dev_matrix = new cuFloatComplex * [nrGPUs];
this->cuArray = new cudaArray** [nrGPUs];
this->texObj = new cudaTextureObject_t*[nrGPUs];
this->GPUIndex = new int[nrGPUs];
for (int i = 0; i < nrGPUs; ++i) {
this->GPUIndex[i] = GPU_index[i];
this->cuArray[i] = new cudaArray * ();
this->texObj[i] = new cudaTextureObject_t();
cudaStatus = prepareGPU(i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "GPU init failed: %s\n", cudaGetErrorString(cudaStatus));
return;
}
cudaStatus = this->initTexture(i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "GPU texture init failed: %s\n", cudaGetErrorString(cudaStatus));
return;
}
}
//this->total = this->matrixDim * this->matrixDim;
//testing
this->total = 4194304;
this->readMutex = new std::mutex();
//this->fileReadMutex = new std::mutex();
this->input = new int* [readThreads];
this->length_input = (int)(std::ceil(2 * this->total / this->readThreads) * 1.5);
for (int j = 0; j < this->readThreads; j++) {
this->input[j] = new int[length_input];
for (int i = 0; i < length_input; i++) {
this->input[j][i] = std::rand() % this->matrixDim;
}
}
}
void spift::initResult() {
this->result = new cuFloatComplex[this->matrixDim * this->matrixDim];
//allocate memory for result
for (int x = 0; x < this->matrixDim * this->matrixDim; ++x) {
cuFloatComplex next;
next.x = 0;
next.y = 0;
this->result[x] = next;
}
}
void spift::initCoalescence()
{
this->shiftIndexMutex = new std::mutex*[matrixDim * 2];
this->done = new int(0);
this->coalescenceSet = new int[matrixDim * 2];
this->coalescence = new float* [matrixDim * 2];
for (int i = 0; i < matrixDim * 2; ++i) {
this->coalescenceSet[i] = 0;
this->shiftIndexMutex[i] = new std::mutex();
this->coalescence[i] = 0;
//this->coalescence[i] = new float[matrixDim * 2];
}
}
cudaError_t spift::prepareGPU(int pos) {
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(this->GPUIndex[pos]);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cudaStatus;
}
// Allocate GPU buffers for matrix
cudaStatus = cudaMalloc((void**)&(this->dev_matrix[pos]), this->matrixDim * this->matrixDim * sizeof(cuFloatComplex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Copy input Matrix from host memory to GPU buffers.
cudaStatus = cudaSetDevice(this->GPUIndex[pos]);
cudaStatus = cudaMemcpy((this->dev_matrix[pos]), this->result, this->matrixDim * this->matrixDim * sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
// Allocate CUDA array in device memory
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaMallocArray(cuArray[pos], &channelDesc, matrixDim * 2, 1);
return cudaSuccess;
}
void spift::writeToFile(std::ofstream* file) {
long long durationRowUpdatelocal = 0;
long long durationColumnUpdatelocal = 0;
long long durationUploadLocal = 0;
for (int i = 0; i < nrGPUS; ++i) {
durationRowUpdatelocal += this->durationRow[i];
durationColumnUpdatelocal += this->durationColumn[i];
durationUploadLocal += this->durationUpload[i];
}
*file << this->readThreads << "\t" << this->matrixDim << "\t" << this->nrGPUS << "\t" << this->durationTotal << "\t" << this->count << "\t" << this->durationWhileRead << "\t" << this->durationFinal << "\t" << this->durationFinalPrep << "\t"<< durationRowUpdatelocal << "\t" << durationColumnUpdatelocal << "\t" << this->nrRowUpdates << "\t" << durationUploadLocal << "\t" << this->durationRead << "\t" << this->durationShiftProcessing << "\t" << this->durationShiftAggregating << "\t" << this->durationShiftProcessingType << "\t" << this->durationShiftProcessingShift << "\t" << this->durationShiftProcessingVector << "\t" << this->durationShiftProcessingGenerating << "\t" << this->nrUpdatesFinal << "\t" << this->blockDim << std::endl;
//*file << this->readThreads << "\t" << this->matrixDim << "\t" << this->nrGPUS << "\t" << this->blockDim << "\t" << this->durationTotal << "\t"<< durationRowUpdatelocal << "\t" << durationColumnUpdatelocal << "\t" << this->nrRowUpdates << "\t" << std::endl;
}
void spift::printResult() {
for (int i = 0; i < matrixDim; ++i) {
for (int j = 0; j < matrixDim; ++j) {
std::cout << "(" << std::setfill(' ') << std::setw(4) << std::roundf(this->result[this->matrixDim * i + j].x * 100) / 100 << ", " << std::setfill(' ') << std::setw(4) << std::roundf(this->result[this->matrixDim * i + j].y * 100) / 100 << "),\t";
}
std::cout << std::endl;
}
}
bool spift::testResult(std::string original){
std::ifstream originalFile(original);
double pos;
int count = 0;
double totalError = 0.0;
for (int i = 0; i < this->matrixDim; ++i) {
for (int j = 0; j < this->matrixDim; ++j) {
originalFile >> pos;
if (abs(this->result[this->matrixDim * i + j].x - pos) > 0.0001 || abs(this->result[this->matrixDim * i + j].y) > 0.0001) {
count++;
}
totalError += abs(this->result[this->matrixDim * i + j].x - pos);
totalError += abs(this->result[this->matrixDim * i + j].y);
}
}
std::cout << this->matrixDim * this->matrixDim << ", " << count << ", totalError: " << totalError << ", averageError: " << totalError / (this->matrixDim * this->matrixDim) << std::endl;
return count == 0;
}
void spift::launchRead() {
std::thread** threads = new std::thread * [this->readThreads];
auto t1 = std::chrono::high_resolution_clock::now();
for (int i = 0; i < readThreads; ++i) {
threads[i] = new std::thread(&spift::readInput, this, i);
}
for (int i = 0; i < readThreads; ++i) {
threads[i]->join();
delete threads[i];
}
delete threads;
auto t2 = std::chrono::high_resolution_clock::now();
this->durationRead = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
}
void spift::readInput(int threadID) {
struct dataPoint next;
next.vis = std::complex<float>(std::rand(), std::rand());
int readPos = 0;
unsigned long long localdurationShiftProcessing = 0;
unsigned long long localdurationShiftAggregating = 0;
unsigned long long localdurationShiftProcessingGenerating = 0;
unsigned long long localdurationShiftProcessingType = 0;
unsigned long long localdurationShiftProcessingShift = 0;
unsigned long long localdurationShiftProcessingVector = 0;
int counter = 0;
//this->fileReadMutex->lock();
while(this->total > 0) {
auto t1 = std::chrono::high_resolution_clock::now();
this->total--;
/*
*(this->inputStream) >> &next;
this->fileReadMutex->unlock();
*/
next.u = this->input[threadID][readPos];
readPos++;
next.v = this->input[threadID][readPos];
readPos++;
readPos %= this->length_input;
int posiCoal;
auto t11 = std::chrono::high_resolution_clock::now();
bool isRowShift = this->shiftType(next.u, next.v);
auto t12 = std::chrono::high_resolution_clock::now();
int shiftIdx = this->shiftIndex(next.u, next.v, isRowShift);
auto t13 = std::chrono::high_resolution_clock::now();
float* vector = this->computeShift(next.u, next.v, next.vis, isRowShift);
if (isRowShift) { posiCoal = shiftIdx; }
else { posiCoal = shiftIdx + this->matrixDim; }
auto t2 = std::chrono::high_resolution_clock::now();
this->shiftIndexMutex[posiCoal]->lock();
if (this->coalescenceSet[posiCoal]) {
for (int j = 0; j < this->matrixDim * 2; ++j) {
this->coalescence[posiCoal][j] += vector[j];
}
delete vector;
}
else {
this->coalescence[posiCoal] = vector;
this->coalescenceSet[posiCoal] = 1;
}
this->shiftIndexMutex[posiCoal]->unlock();
auto t3 = std::chrono::high_resolution_clock::now();
// std::cout << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() << ", " << std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count() << std::endl;
localdurationShiftProcessing += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
localdurationShiftAggregating += std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count();
localdurationShiftProcessingGenerating += std::chrono::duration_cast<std::chrono::microseconds>(t11 - t1).count();
localdurationShiftProcessingType += std::chrono::duration_cast<std::chrono::microseconds>(t12 - t11).count();
localdurationShiftProcessingShift += std::chrono::duration_cast<std::chrono::microseconds>(t13- t12).count();
localdurationShiftProcessingVector += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t13).count();
counter++;
//this->fileReadMutex->lock();
}
//this->fileReadMutex->unlock();
//std::cout << "counter: " << localdurationShiftAggregating << ", " << localdurationShiftProcessing << std::endl;
*(this->done) = 1;
this->readMutex->lock();
this->durationShiftAggregating += localdurationShiftAggregating;
this->durationShiftProcessing += localdurationShiftProcessing;
this->durationShiftProcessingType += localdurationShiftProcessingType;
this->durationShiftProcessingGenerating += localdurationShiftProcessingGenerating;
this->durationShiftProcessingShift += localdurationShiftProcessingShift;
this->durationShiftProcessingVector += localdurationShiftProcessingVector;
this->readMutex->unlock();
}
cudaError spift::combine2Cards(int sourceCard, int targetCard) {
cuFloatComplex* temp;
cudaSetDevice(this->GPUIndex[targetCard]);
auto cudaStatus = cudaMalloc((void**)&temp, this->matrixDim * this->matrixDim * sizeof(cuFloatComplex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Malloc for combine2Cards failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = cudaMemcpyPeer(temp, this->GPUIndex[targetCard], this->dev_matrix[sourceCard], this->GPUIndex[sourceCard], this->matrixDim * this->matrixDim * sizeof(cuFloatComplex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Memcpy for combine2Cards failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaSetDevice(this->GPUIndex[targetCard]);
sumResults << <*gridDim3, * blockDim3 >> > (this->dev_matrix[targetCard], temp, this->matrixDim);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "sumResults launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaFree(temp);
return cudaSuccess;
}
cudaError spift::aggregateResult() {
cudaError cudaStatus;
for (int i = 1; i < this->nrGPUS; ++i) {
cudaStatus = this->combine2Cards(i - 1, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Memcpy for combine2Cards failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
return cudaSuccess;
}
void spift::splitIteration() {
this->count = 0;
std::thread** threads = new std::thread*[this->nrGPUS];
auto t1 = std::chrono::high_resolution_clock::now();
for (int i = 0; i < this->nrGPUS; ++i) {
threads[i] = new std::thread(&spift::iterate, this, i, round((float)this->matrixDim * 2 / this->nrGPUS * i), round((float)this->matrixDim * 2 / this->nrGPUS * (i+1)));
}
for (int i = 0; i < this->nrGPUS; ++i) {
threads[i]->join();
}
auto t15 = std::chrono::high_resolution_clock::now();
auto cudaStatus = this->aggregateResult();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Aggregation failed\n", cudaGetErrorString(cudaStatus));
return;
}
cudaStatus = this->getResult(this->nrGPUS-1);
auto t2 = std::chrono::high_resolution_clock::now();
this->durationTotal = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
this->durationFinalPrep = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t15).count();
//std::cout << "Total number of aggregations: " << this->count << std::endl;
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "getResult failed\n", cudaGetErrorString(cudaStatus));
return;
}
}
cudaError_t spift::iterate(int pos, int start, int end) {
cudaError_t cudaStatus = cudaSuccess;
auto t1 = std::chrono::high_resolution_clock::now();
while(!*done) {
for (int shiftPos = start; shiftPos < end; ++shiftPos) {
if (this->coalescenceSet[shiftPos]) {
//std::cout << shiftPos << std::endl;
if (this->shiftIndexMutex[shiftPos]->try_lock()) {
this->coalescenceSet[shiftPos] = 0;
cudaStatus = iteration(shiftPos, pos);
this->count++;
// Check for any errors in iteration
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "iteration %d failed: %s\n", shiftPos, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
}
}
}
auto t2 = std::chrono::high_resolution_clock::now();
int counter2 = 0;
for (int shiftPos = start; shiftPos < end; ++shiftPos) {
if (this->coalescenceSet[shiftPos]) {
if (this->shiftIndexMutex[shiftPos]->try_lock()) {
counter2++;
//std::cout << shiftPos << std::endl;
this->coalescenceSet[shiftPos] = 0;
cudaStatus = iteration(shiftPos, pos);
this->count++;
// Check for any errors in iteration
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "iteration %d failed: %s\n", shiftPos, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
}
}
this->nrUpdatesFinal += counter2;
auto t3 = std::chrono::high_resolution_clock::now();
this->durationWhileRead += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
this->durationFinal += std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count();
return cudaStatus;
}
cudaError_t spift::getResult(int pos) {
// Copy output vector from GPU buffer to host memory.
auto cudaStatus = cudaSetDevice(this->GPUIndex[pos]);
divideN2 << <*gridDim3, * blockDim3 >> > (dev_matrix[pos], this->matrixDim);
cudaStatus = cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divideN2 launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = cudaMemcpy(result, dev_matrix[pos], matrixDim * matrixDim * sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
return cudaSuccess;
}
void spift::getShiftVector(int i, int pos) {
auto t1 = std::chrono::high_resolution_clock::now();
cudaSetDevice(this->GPUIndex[pos]);
auto cudaStatus = cudaMemcpyToArray(*cuArray[pos], 0, 0, this->coalescence[i], matrixDim * 2 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "updateWithRowShift launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
delete this->coalescence[i];
auto t2= std::chrono::high_resolution_clock::now();
this->durationUpload[pos] += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
}
cudaError spift::iteration(int shift, int pos) {
getShiftVector(shift, pos);
cudaSetDevice(this->GPUIndex[pos]);
auto t1 = std::chrono::high_resolution_clock::now();
cudaError cudaStatus;
if (shift < this->matrixDim) {
updateWithRowShift << <*gridDim3, *blockDim3 >> > (dev_matrix[pos], *texObj[pos], this->matrixDim, shift);
cudaStatus = cudaSetDevice(this->GPUIndex[pos]);
cudaStatus = cudaDeviceSynchronize();
auto t2 = std::chrono::high_resolution_clock::now();
this->durationRow[pos] += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
this->nrRowUpdates++;
}
else {
updateWithColumnShift << <*gridDim3, *blockDim3 >> > (dev_matrix[pos], *texObj[pos], this->matrixDim, shift - this->matrixDim);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaSetDevice(this->GPUIndex[pos]);
cudaStatus = cudaDeviceSynchronize();
auto t2 = std::chrono::high_resolution_clock::now();
this->durationColumn[pos] += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "updateWithRowShift launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d: %s after launching Kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
this->shiftIndexMutex[shift]->unlock();
return cudaSuccess;
}
cudaError_t spift::initTexture(int pos) {
// Specify texture
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = *(this->cuArray[pos]);
// Specify texture object parameters
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 0;
// Create texture object
cudaSetDevice(this->GPUIndex[pos]);
auto cudaStatus = cudaCreateTextureObject(this->texObj[pos], &resDesc, &texDesc, NULL);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "textureInit failed: %s\n", cudaGetErrorString(cudaStatus));
}
return cudaSuccess;
}
bool spift::shiftType(int u, int v) { //true: RowShift, false: ColumnShift
return !(v == 0 || ((u % 2) && !(v % 2)) || (std::__gcd(u,this->matrixDim) < std::__gcd(v, this->matrixDim)) && v % 2 == 0);
}
int spift::shiftIndex(int u, int v, bool isRowShift) {
int uk;
int vk;
if (u == 0 || v == 0) {
return 0;
}
if (isRowShift) {
uk = u;
vk = v;
}
else
{
uk = v;
vk = u;
}
for (int j = 0; j <= this->matrixDim; ++j) {
if (uk == (j * vk) % this->matrixDim) {
return j;
}
}
std::cout << u << ", " << v << ", " << isRowShift << std::endl;
throw 15;
}
float* spift::computeShift(int u, int v, std::complex<float> vis, bool isRowShift) {
int x;
float* shift = new float[this->matrixDim * 2];
if (isRowShift) { x = v; }
else { x = u; }
for (int j = 0; j < matrixDim; ++j) {
std::complex<float> next = vis * this->twiddleFactors[(j * x) % this->matrixDim];
shift[2 * j] = next.real();
shift[2 * j + 1] = next.imag();
}
return shift;
}
void spift::initTwiddle() {
this->twiddleFactors = new std::complex<float>[this->matrixDim];
for(int k = 0; k < this->matrixDim; ++k){
std::complex<float> next = std::exp(std::complex<float>(0, k * 2 * M_PI / this->matrixDim));
this->twiddleFactors[k] = next;
}
}
spift::~spift()
{
for (int i = 0; i < this->matrixDim * 2; i++) {
delete this->shiftIndexMutex[i];
}
delete (this->shiftIndexMutex);
delete (this->result);
for (int i = 0; i < this->nrGPUS; ++i) {
cudaSetDevice(this->GPUIndex[i]);
cudaFree(this->dev_matrix[i]);
cudaFreeArray(*(this->cuArray[i]));
cudaFree(this->texObj[i]);
cudaError cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
}
}
delete this->texObj;
delete this->gridDim3;
delete this->blockDim3;
for (int j = 0; j < this->readThreads; j++) {
delete this->input[j];
}
delete this->input;
delete this->GPUIndex;
delete (this->dev_matrix);
delete (this->cuArray);
delete this->readMutex;
delete (this->coalescenceSet);
delete (this->done);
delete this->twiddleFactors;
delete this->coalescence;
}
void parallel(const int nrGPUs, const int dim, std::ofstream *times, const int concurrency, const int blockDim) {
int gpuIndex[] = { 0,1,2,3,4,5};
spift* tester = new spift(dim, blockDim, gpuIndex, nrGPUs, concurrency);
//tester->launchRead();
//tester->splitIteration();
//std::cout << concurrency << "\t" << dim << "\t" << nrGPUs << std::endl;
std::thread shifts(&spift::launchRead, tester);
std::thread iter(&spift::splitIteration, tester);
shifts.join();
iter.join();
tester->writeToFile(times);
/*
if (tester->testResult("originalData1024.txt")) {
std::cout << "success" << std::endl;
}
else
{
std::cout << "failed" << std::endl;
}
*/
//tester->printResult();
delete tester;
}
int main()
{
std::ofstream times;
times.open("timesGPU.txt");
//parallel(6, std::pow(2, 12), ×, 50, 1024);
/*
for (int i = 1; i <= 1; i++) {
parallel(6, std::pow(2, 10), ×, 50, 128);
}
*/
//parallel(6, std::pow(2, 11), ×, 40, 128);
for (int concurrency = 4; concurrency < 30; concurrency += 2) {
for (int j = 10; j < 13; ++j) {
for (int i = 1; i <= 6; i++) {
std::cout << "Block B: concurrency: " << concurrency << ", dim: " << std::pow(2, j) << ", nrGPUS: " << i << std::endl;
parallel(i, std::pow(2, j), ×, concurrency,128);
}
}
}
for (int concurrency = 30; concurrency <= 80; concurrency += 5) {
for (int j = 10; j < 14; ++j) {
for (int i = 1; i <= 6; i++) {
std::cout << "Block C: concurrency: " << concurrency << ", dim: " << std::pow(2, j) << ", nrGPUS: " << i << std::endl;
parallel(i, std::pow(2, j), ×, concurrency,128);
}
}
}
for (int concurrency = 2; concurrency < 4; concurrency += 1) {
for (int j = 10; j < 13; ++j) {
for (int i = 1; i <= 6; i++) {
std::cout << "Block A: concurrency: " << concurrency << ", dim: " << std::pow(2, j) << ", nrGPUS: " << i << std::endl;
parallel(i, std::pow(2, j), ×, concurrency, 128);
}
}
}
times.close();
return 0;
}
|
ed04fc934fbb748a9c446f3e33967284ac9c1a89.hip | // !!! This is a file automatically generated by hipify!!!
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
extern "C"
{
//kernel code
__global__ void ResetLayerKernel(
float *layer,
float value,
int count
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
layer[threadId] = value;
}
}
} | ed04fc934fbb748a9c446f3e33967284ac9c1a89.cu | #define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
extern "C"
{
//kernel code
__global__ void ResetLayerKernel(
float *layer,
float value,
int count
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
layer[threadId] = value;
}
}
} |
8600ba68f85611e4db2eaf471755f3afbcd8c8ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This algorithm was adapted from SegAlign's Ungapped Extender authored by
* Sneha Goenka (gsneha@stanford.edu) and Yatish Turakhia (yturakhi@ucsc.edu).
* Source code for original implementation and use in SegAlign can be found
* here: https://github.com/gsneha26/SegAlign
* Description of the algorithm and original implementation can be found in the SegAlign
* paper published in SC20 (https://doi.ieeecomputersociety.org/10.1109/SC41405.2020.00043)
*/
#include "ungapped_xdrop_kernels.cuh"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudaextender
{
// extend the seed values to a segment by ungapped x-drop method, adjust low-scoring
// segment scores based on entropy factor, compare resulting segment scores
// to score_threshold and update the d_scored_segment_pairs and d_done vectors
__global__ void find_high_scoring_segment_pairs(const int8_t* __restrict__ d_target,
const int32_t target_length,
const int8_t* __restrict__ d_query,
const int32_t query_length,
const int32_t* d_sub_mat,
const bool no_entropy,
const int32_t xdrop_threshold,
const int32_t score_threshold,
const SeedPair* d_seed_pairs,
const int32_t num_seed_pairs,
const int32_t start_index,
ScoredSegmentPair* d_scored_segment_pairs,
int32_t* d_done)
{
// TODO - Kernel optimizations -
// Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/579
// TODO - Following variables are an artifact of the hardcoded encoding scheme with a fixed
// scoring matrix and a fixed alphabet. Will be replaced with cudasequence API.
// Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/574
constexpr int32_t nuc = 8;
constexpr int32_t nuc2 = 64;
constexpr int32_t nuc_entropy = 4;
constexpr int32_t num_warps = 4;
constexpr int32_t warp_size = 32;
const int32_t lane_id = threadIdx.x % warp_size;
const int32_t warp_id = (threadIdx.x - lane_id) / warp_size;
const float ln_4 = log(4.0f);
__shared__ int32_t ref_loc[num_warps];
__shared__ int32_t query_loc[num_warps];
__shared__ int32_t total_score[num_warps];
__shared__ int32_t prev_score[num_warps];
__shared__ int32_t prev_max_score[num_warps];
__shared__ int32_t prev_max_pos[num_warps];
__shared__ bool edge_found[num_warps];
__shared__ bool xdrop_found[num_warps];
__shared__ bool find_ssp[num_warps];
__shared__ bool new_max_found[num_warps];
__shared__ int32_t left_extent[num_warps];
__shared__ int32_t extent[num_warps];
__shared__ int32_t tile[num_warps];
__shared__ int32_t sub_mat[nuc2];
// TODO - Accuracy exploration required to check if the following member can be a float and to
// check the condition for entropy calculation.
// Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/575
__shared__ double entropy[num_warps];
if (threadIdx.x < nuc2)
{
sub_mat[threadIdx.x] = d_sub_mat[threadIdx.x];
}
__syncthreads();
for (int32_t hid0 = blockIdx.x * num_warps; hid0 < num_seed_pairs; hid0 += num_warps * gridDim.x)
{
short count[nuc_entropy] = {0};
short count_del[nuc_entropy] = {0};
const int32_t hid = hid0 + warp_id + start_index;
if (lane_id == 0)
{
if (hid < num_seed_pairs)
{
ref_loc[warp_id] = d_seed_pairs[hid].target_position_in_read;
query_loc[warp_id] = d_seed_pairs[hid].query_position_in_read;
if(d_done[hid] == 0)
{
find_ssp[warp_id] = true;
}
else
{
find_ssp[warp_id] = false;
}
}
else
{
ref_loc[warp_id] = d_seed_pairs[hid0].target_position_in_read;
query_loc[warp_id] = d_seed_pairs[hid0].query_position_in_read;
if(d_done[hid0] == 0)
{
find_ssp[warp_id] = true;
}
else
{
find_ssp[warp_id] = false;
}
}
total_score[warp_id] = 0;
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
edge_found[warp_id] = false;
new_max_found[warp_id] = false;
entropy[warp_id] = 1.0f;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = -1;
extent[warp_id] = 0;
}
__syncwarp();
//////////////////////////////////////////////////////////////////
//Right extension
while (!xdrop_found[warp_id] && !edge_found[warp_id] && find_ssp[warp_id])
{
int32_t thread_score = 0;
const int32_t pos_offset = lane_id + tile[warp_id];
const int32_t ref_pos = ref_loc[warp_id] + pos_offset;
const int32_t query_pos = query_loc[warp_id] + pos_offset;
int8_t r_chr;
int8_t q_chr;
if (ref_pos < target_length && query_pos < query_length)
{
r_chr = d_target[ref_pos];
q_chr = d_query[query_pos];
thread_score = sub_mat[r_chr * nuc + q_chr];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if (lane_id >= offset)
{
thread_score += temp_score;
}
}
thread_score += prev_score[warp_id];
int32_t max_pos;
int32_t max_thread_score;
if (thread_score > prev_max_score[warp_id])
{
max_thread_score = thread_score;
max_pos = pos_offset;
}
else
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
bool xdrop_done = ((max_thread_score - thread_score) > xdrop_threshold);
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
bool temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if (lane_id >= offset)
{
xdrop_done |= temp_xdrop_done;
}
}
if (xdrop_done)
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
__syncwarp();
if (lane_id == warp_size - 1)
{
new_max_found[warp_id] = max_pos > prev_max_pos[warp_id];
prev_max_pos[warp_id] = max_pos;
if (xdrop_done)
{
total_score[warp_id] += max_thread_score;
xdrop_found[warp_id] = true;
extent[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else if (ref_pos >= target_length || query_pos >= query_length)
{
total_score[warp_id] += max_thread_score;
edge_found[warp_id] = true;
extent[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else
{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
tile[warp_id] += warp_size;
}
}
__syncwarp();
if (new_max_found[warp_id])
{
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if (r_chr == q_chr && r_chr < nuc_entropy)
{
if (pos_offset <= prev_max_pos[warp_id])
{
count[r_chr] += 1;
}
else
{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
__syncwarp();
////////////////////////////////////////////////////////////////
//Left extension
if (lane_id == 0)
{
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
edge_found[warp_id] = false;
new_max_found[warp_id] = false;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = 0;
left_extent[warp_id] = 0;
}
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
count_del[i] = 0;
}
__syncwarp();
while (!xdrop_found[warp_id] && !edge_found[warp_id] && find_ssp[warp_id])
{
int32_t thread_score = 0;
const int32_t pos_offset = lane_id + 1 + tile[warp_id];
int8_t r_chr;
int8_t q_chr;
if (ref_loc[warp_id] >= pos_offset && query_loc[warp_id] >= pos_offset)
{
const int32_t ref_pos = ref_loc[warp_id] - pos_offset;
const int32_t query_pos = query_loc[warp_id] - pos_offset;
r_chr = d_target[ref_pos];
q_chr = d_query[query_pos];
thread_score = sub_mat[r_chr * nuc + q_chr];
}
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if (lane_id >= offset)
{
thread_score += temp_score;
}
}
thread_score += prev_score[warp_id];
int32_t max_pos;
int32_t max_thread_score;
if (thread_score > prev_max_score[warp_id])
{
max_thread_score = thread_score;
max_pos = pos_offset;
}
else
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
bool xdrop_done = ((max_thread_score - thread_score) > xdrop_threshold);
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
bool temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if (lane_id >= offset)
{
xdrop_done |= temp_xdrop_done;
}
}
if (xdrop_done)
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
__syncwarp();
if (lane_id == warp_size - 1)
{
new_max_found[warp_id] = max_pos > prev_max_pos[warp_id];
prev_max_pos[warp_id] = max_pos;
if (xdrop_done)
{
total_score[warp_id] += max_thread_score;
xdrop_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
tile[warp_id] = max_pos;
}
else if (ref_loc[warp_id] < pos_offset || query_loc[warp_id] < pos_offset)
{
total_score[warp_id] += max_thread_score;
edge_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
tile[warp_id] = max_pos;
}
else
{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
tile[warp_id] += warp_size;
}
}
__syncwarp();
if (new_max_found[warp_id])
{
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if (r_chr == q_chr && r_chr < nuc_entropy)
{
if (pos_offset <= prev_max_pos[warp_id])
{
count[r_chr] += 1;
}
else
{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
//////////////////////////////////////////////////////////////////
if (total_score[warp_id] >= score_threshold && total_score[warp_id] <= 3 * score_threshold && !no_entropy)
{
for (int32_t i = 0; i < nuc_entropy; i++)
{
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
count[i] += __shfl_up_sync(0xFFFFFFFF, count[i], offset);
}
}
__syncwarp();
if (lane_id == warp_size - 1 && ((count[0] + count[1] + count[2] + count[3]) >= 20))
{
double entropy_ln = 0.f;
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
if (count[i] != 0)
{
const double probability = static_cast<double>(count[i]) / static_cast<double>(extent[warp_id] + 1);
entropy_ln += (probability)*log(probability);
}
}
entropy[warp_id] = -entropy_ln / ln_4; // Store Entropy with log base 4
}
}
__syncwarp();
//////////////////////////////////////////////////////////////////
if (hid < num_seed_pairs)
{
if (lane_id == 0)
{
if (static_cast<int32_t>(static_cast<double>(total_score[warp_id]) * entropy[warp_id]) >= score_threshold)
{
d_scored_segment_pairs[hid].start_coord.target_position_in_read = ref_loc[warp_id] - left_extent[warp_id];
d_scored_segment_pairs[hid].start_coord.query_position_in_read = query_loc[warp_id] - left_extent[warp_id];
d_scored_segment_pairs[hid].length = extent[warp_id];
if (entropy[warp_id] > 0)
d_scored_segment_pairs[hid].score = total_score[warp_id] * entropy[warp_id];
d_done[hid - start_index] = 1;
}
else
{
d_scored_segment_pairs[hid].start_coord.target_position_in_read = ref_loc[warp_id];
d_scored_segment_pairs[hid].start_coord.query_position_in_read = query_loc[warp_id];
d_scored_segment_pairs[hid].length = 0;
d_scored_segment_pairs[hid].score = 0;
d_done[hid - start_index] = 0;
}
}
}
__syncwarp();
}
}
// Gather the SSPs from the resulting segments to the beginning of the tmp_ssp array
__global__ void compress_output(const int32_t* d_done,
const int32_t start_index,
const ScoredSegmentPair* d_ssp,
ScoredSegmentPair* d_tmp_ssp,
const int32_t num_seed_pairs)
{
const int32_t stride = blockDim.x * gridDim.x;
const int32_t start = blockDim.x * blockIdx.x + threadIdx.x;
for (int32_t id = start; id < num_seed_pairs; id += stride)
{
const int32_t reduced_index = d_done[id];
const int32_t index = id + start_index;
if (index > 0)
{
if (reduced_index > d_done[index - 1])
{
d_tmp_ssp[reduced_index - 1] = d_ssp[index];
}
}
else
{
if (reduced_index == 1)
{
d_tmp_ssp[0] = d_ssp[start_index];
}
}
}
}
} // namespace cudaextender
} // namespace genomeworks
} // namespace claraparabricks
| 8600ba68f85611e4db2eaf471755f3afbcd8c8ad.cu | /*
* Copyright 2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This algorithm was adapted from SegAlign's Ungapped Extender authored by
* Sneha Goenka (gsneha@stanford.edu) and Yatish Turakhia (yturakhi@ucsc.edu).
* Source code for original implementation and use in SegAlign can be found
* here: https://github.com/gsneha26/SegAlign
* Description of the algorithm and original implementation can be found in the SegAlign
* paper published in SC20 (https://doi.ieeecomputersociety.org/10.1109/SC41405.2020.00043)
*/
#include "ungapped_xdrop_kernels.cuh"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudaextender
{
// extend the seed values to a segment by ungapped x-drop method, adjust low-scoring
// segment scores based on entropy factor, compare resulting segment scores
// to score_threshold and update the d_scored_segment_pairs and d_done vectors
__global__ void find_high_scoring_segment_pairs(const int8_t* __restrict__ d_target,
const int32_t target_length,
const int8_t* __restrict__ d_query,
const int32_t query_length,
const int32_t* d_sub_mat,
const bool no_entropy,
const int32_t xdrop_threshold,
const int32_t score_threshold,
const SeedPair* d_seed_pairs,
const int32_t num_seed_pairs,
const int32_t start_index,
ScoredSegmentPair* d_scored_segment_pairs,
int32_t* d_done)
{
// TODO - Kernel optimizations -
// Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/579
// TODO - Following variables are an artifact of the hardcoded encoding scheme with a fixed
// scoring matrix and a fixed alphabet. Will be replaced with cudasequence API.
// Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/574
constexpr int32_t nuc = 8;
constexpr int32_t nuc2 = 64;
constexpr int32_t nuc_entropy = 4;
constexpr int32_t num_warps = 4;
constexpr int32_t warp_size = 32;
const int32_t lane_id = threadIdx.x % warp_size;
const int32_t warp_id = (threadIdx.x - lane_id) / warp_size;
const float ln_4 = log(4.0f);
__shared__ int32_t ref_loc[num_warps];
__shared__ int32_t query_loc[num_warps];
__shared__ int32_t total_score[num_warps];
__shared__ int32_t prev_score[num_warps];
__shared__ int32_t prev_max_score[num_warps];
__shared__ int32_t prev_max_pos[num_warps];
__shared__ bool edge_found[num_warps];
__shared__ bool xdrop_found[num_warps];
__shared__ bool find_ssp[num_warps];
__shared__ bool new_max_found[num_warps];
__shared__ int32_t left_extent[num_warps];
__shared__ int32_t extent[num_warps];
__shared__ int32_t tile[num_warps];
__shared__ int32_t sub_mat[nuc2];
// TODO - Accuracy exploration required to check if the following member can be a float and to
// check the condition for entropy calculation.
// Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/575
__shared__ double entropy[num_warps];
if (threadIdx.x < nuc2)
{
sub_mat[threadIdx.x] = d_sub_mat[threadIdx.x];
}
__syncthreads();
for (int32_t hid0 = blockIdx.x * num_warps; hid0 < num_seed_pairs; hid0 += num_warps * gridDim.x)
{
short count[nuc_entropy] = {0};
short count_del[nuc_entropy] = {0};
const int32_t hid = hid0 + warp_id + start_index;
if (lane_id == 0)
{
if (hid < num_seed_pairs)
{
ref_loc[warp_id] = d_seed_pairs[hid].target_position_in_read;
query_loc[warp_id] = d_seed_pairs[hid].query_position_in_read;
if(d_done[hid] == 0)
{
find_ssp[warp_id] = true;
}
else
{
find_ssp[warp_id] = false;
}
}
else
{
ref_loc[warp_id] = d_seed_pairs[hid0].target_position_in_read;
query_loc[warp_id] = d_seed_pairs[hid0].query_position_in_read;
if(d_done[hid0] == 0)
{
find_ssp[warp_id] = true;
}
else
{
find_ssp[warp_id] = false;
}
}
total_score[warp_id] = 0;
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
edge_found[warp_id] = false;
new_max_found[warp_id] = false;
entropy[warp_id] = 1.0f;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = -1;
extent[warp_id] = 0;
}
__syncwarp();
//////////////////////////////////////////////////////////////////
//Right extension
while (!xdrop_found[warp_id] && !edge_found[warp_id] && find_ssp[warp_id])
{
int32_t thread_score = 0;
const int32_t pos_offset = lane_id + tile[warp_id];
const int32_t ref_pos = ref_loc[warp_id] + pos_offset;
const int32_t query_pos = query_loc[warp_id] + pos_offset;
int8_t r_chr;
int8_t q_chr;
if (ref_pos < target_length && query_pos < query_length)
{
r_chr = d_target[ref_pos];
q_chr = d_query[query_pos];
thread_score = sub_mat[r_chr * nuc + q_chr];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if (lane_id >= offset)
{
thread_score += temp_score;
}
}
thread_score += prev_score[warp_id];
int32_t max_pos;
int32_t max_thread_score;
if (thread_score > prev_max_score[warp_id])
{
max_thread_score = thread_score;
max_pos = pos_offset;
}
else
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
bool xdrop_done = ((max_thread_score - thread_score) > xdrop_threshold);
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
bool temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if (lane_id >= offset)
{
xdrop_done |= temp_xdrop_done;
}
}
if (xdrop_done)
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
__syncwarp();
if (lane_id == warp_size - 1)
{
new_max_found[warp_id] = max_pos > prev_max_pos[warp_id];
prev_max_pos[warp_id] = max_pos;
if (xdrop_done)
{
total_score[warp_id] += max_thread_score;
xdrop_found[warp_id] = true;
extent[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else if (ref_pos >= target_length || query_pos >= query_length)
{
total_score[warp_id] += max_thread_score;
edge_found[warp_id] = true;
extent[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else
{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
tile[warp_id] += warp_size;
}
}
__syncwarp();
if (new_max_found[warp_id])
{
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if (r_chr == q_chr && r_chr < nuc_entropy)
{
if (pos_offset <= prev_max_pos[warp_id])
{
count[r_chr] += 1;
}
else
{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
__syncwarp();
////////////////////////////////////////////////////////////////
//Left extension
if (lane_id == 0)
{
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
edge_found[warp_id] = false;
new_max_found[warp_id] = false;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = 0;
left_extent[warp_id] = 0;
}
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
count_del[i] = 0;
}
__syncwarp();
while (!xdrop_found[warp_id] && !edge_found[warp_id] && find_ssp[warp_id])
{
int32_t thread_score = 0;
const int32_t pos_offset = lane_id + 1 + tile[warp_id];
int8_t r_chr;
int8_t q_chr;
if (ref_loc[warp_id] >= pos_offset && query_loc[warp_id] >= pos_offset)
{
const int32_t ref_pos = ref_loc[warp_id] - pos_offset;
const int32_t query_pos = query_loc[warp_id] - pos_offset;
r_chr = d_target[ref_pos];
q_chr = d_query[query_pos];
thread_score = sub_mat[r_chr * nuc + q_chr];
}
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if (lane_id >= offset)
{
thread_score += temp_score;
}
}
thread_score += prev_score[warp_id];
int32_t max_pos;
int32_t max_thread_score;
if (thread_score > prev_max_score[warp_id])
{
max_thread_score = thread_score;
max_pos = pos_offset;
}
else
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
bool xdrop_done = ((max_thread_score - thread_score) > xdrop_threshold);
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
bool temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if (lane_id >= offset)
{
xdrop_done |= temp_xdrop_done;
}
}
if (xdrop_done)
{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
const int32_t temp_score = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
const int32_t temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if (lane_id >= offset)
{
if (temp_score >= max_thread_score)
{
max_thread_score = temp_score;
max_pos = temp_pos;
}
}
}
__syncwarp();
if (lane_id == warp_size - 1)
{
new_max_found[warp_id] = max_pos > prev_max_pos[warp_id];
prev_max_pos[warp_id] = max_pos;
if (xdrop_done)
{
total_score[warp_id] += max_thread_score;
xdrop_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
tile[warp_id] = max_pos;
}
else if (ref_loc[warp_id] < pos_offset || query_loc[warp_id] < pos_offset)
{
total_score[warp_id] += max_thread_score;
edge_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
tile[warp_id] = max_pos;
}
else
{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
tile[warp_id] += warp_size;
}
}
__syncwarp();
if (new_max_found[warp_id])
{
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if (r_chr == q_chr && r_chr < nuc_entropy)
{
if (pos_offset <= prev_max_pos[warp_id])
{
count[r_chr] += 1;
}
else
{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
//////////////////////////////////////////////////////////////////
if (total_score[warp_id] >= score_threshold && total_score[warp_id] <= 3 * score_threshold && !no_entropy)
{
for (int32_t i = 0; i < nuc_entropy; i++)
{
#pragma unroll
for (int32_t offset = 1; offset < warp_size; offset = offset << 1)
{
count[i] += __shfl_up_sync(0xFFFFFFFF, count[i], offset);
}
}
__syncwarp();
if (lane_id == warp_size - 1 && ((count[0] + count[1] + count[2] + count[3]) >= 20))
{
double entropy_ln = 0.f;
#pragma unroll
for (int32_t i = 0; i < nuc_entropy; i++)
{
if (count[i] != 0)
{
const double probability = static_cast<double>(count[i]) / static_cast<double>(extent[warp_id] + 1);
entropy_ln += (probability)*log(probability);
}
}
entropy[warp_id] = -entropy_ln / ln_4; // Store Entropy with log base 4
}
}
__syncwarp();
//////////////////////////////////////////////////////////////////
if (hid < num_seed_pairs)
{
if (lane_id == 0)
{
if (static_cast<int32_t>(static_cast<double>(total_score[warp_id]) * entropy[warp_id]) >= score_threshold)
{
d_scored_segment_pairs[hid].start_coord.target_position_in_read = ref_loc[warp_id] - left_extent[warp_id];
d_scored_segment_pairs[hid].start_coord.query_position_in_read = query_loc[warp_id] - left_extent[warp_id];
d_scored_segment_pairs[hid].length = extent[warp_id];
if (entropy[warp_id] > 0)
d_scored_segment_pairs[hid].score = total_score[warp_id] * entropy[warp_id];
d_done[hid - start_index] = 1;
}
else
{
d_scored_segment_pairs[hid].start_coord.target_position_in_read = ref_loc[warp_id];
d_scored_segment_pairs[hid].start_coord.query_position_in_read = query_loc[warp_id];
d_scored_segment_pairs[hid].length = 0;
d_scored_segment_pairs[hid].score = 0;
d_done[hid - start_index] = 0;
}
}
}
__syncwarp();
}
}
// Gather the SSPs from the resulting segments to the beginning of the tmp_ssp array
__global__ void compress_output(const int32_t* d_done,
const int32_t start_index,
const ScoredSegmentPair* d_ssp,
ScoredSegmentPair* d_tmp_ssp,
const int32_t num_seed_pairs)
{
const int32_t stride = blockDim.x * gridDim.x;
const int32_t start = blockDim.x * blockIdx.x + threadIdx.x;
for (int32_t id = start; id < num_seed_pairs; id += stride)
{
const int32_t reduced_index = d_done[id];
const int32_t index = id + start_index;
if (index > 0)
{
if (reduced_index > d_done[index - 1])
{
d_tmp_ssp[reduced_index - 1] = d_ssp[index];
}
}
else
{
if (reduced_index == 1)
{
d_tmp_ssp[0] = d_ssp[start_index];
}
}
}
}
} // namespace cudaextender
} // namespace genomeworks
} // namespace claraparabricks
|
4fa7505f4fda74479f2d77daf65e8dab26d3d149.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Kmer.hpp"
#include "Friends.h"
#include "MPIType.h"
#include "SimpleCount.h"
#include "FriendsMPI.h"
#include "Pack.h"
#include "spmer_kmrCnt.h"
__device__ keyType find_minimizer(keyType kmer, int klen, int mlen, keyType max64){
keyType minimizer = max64;
keyType mask = pow(2, 2 * mlen) - 1;
for (int m = 0; m < (klen - mlen ); ++m){
keyType mmer = (kmer >> (2*(31-(mlen+m -1)))) & mask;
if( mmer < minimizer )
minimizer = mmer;
}
return minimizer;
}
__global__ void cuda_build_supermer(char *seq, char *kmers, int klen, int mlen, unsigned int seq_len,
keyType* outgoing, unsigned char *out_slen, int *owner_counter, int nproc, unsigned int p_buff_len,
int per_block_seq_len, int window, int rank){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & (blockDim.x - 1);
unsigned int gId = (blockIdx.x * blockDim.x + tId);
int st_char_block = blockIdx.x * per_block_seq_len; //first char this block should read
int nKmer = seq_len - klen + 1; //last char is 'a'
keyType max64 = 18446744073709551615;
bool validKmer = true;
int slen = klen;
keyType comprs_Smer = 0;
keyType comprs_Kmer = 0;
int owner = -1;
int old_count=-1;
keyType cur_mini = max64; keyType prev_mini = cur_mini;
//****First kmer of this window *****
int i = st_char_block + laneId * window;
if(i < nKmer) {
comprs_Kmer = 0;
for (int k = 0; k < klen ; ++k) {
char s = seq[i + k ];
if(s == 'a' || s == 'N') {
// w += klen-1;
validKmer = false; break; //FIXIT can have 'n'
}
int j = k % 32;
size_t x = ((s) & 4) >> 1;
// comprs_Kmer |= ((x + ((x ^ (s & 2)) >>1)) << (2*(31-j))); //make it longs[] to support larger kmer
switch(s) { //redefined
case 'A': comprs_Kmer |= ((x + (x^1)) << (2*(31-j))); break;
case 'C': comprs_Kmer |= ((x + (x^0)) << (2*(31-j))); break;
case 'G': comprs_Kmer |= ((x + (x^3)) << (2*(31-j))); break;
case 'T': comprs_Kmer |= ((x + (x^2)) << (2*(31-j))); break;
}
}
if(validKmer){
cur_mini = find_minimizer(comprs_Kmer, klen, mlen, max64);
prev_mini = cur_mini;
// owner = cuda_murmur3_64(cur_mini) & (nproc - 1); // remove & with HTcapacity in func
// keyType myhash = cuda_murmur3_64(cur_mini); // remove & with HTcapacity in func
keyType myhash = cuda_MurmurHash3_x64_128((const void *)&cur_mini, 8, 313);// & (nproc - 1);
double range = static_cast<double>(myhash) * static_cast<double>(nproc);
owner = range / max64;
old_count = atomicAdd(&owner_counter[owner],1);
outgoing[owner * p_buff_len + old_count] = comprs_Kmer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = klen;
}
comprs_Smer = comprs_Kmer;
slen = klen;
int c = st_char_block + (laneId * window);
for(int w = 1; w < window && (c+w) < nKmer ; w++) {
validKmer = true;
comprs_Kmer = 0;
// if ((i + klen-1) > nKmer) return;
for (int k = 0; k < klen ; ++k) {
char s = seq[c + w + k ];
if(s == 'a' || s == 'N') {
// w += klen-1;
validKmer = false; break;
}
int j = k % 32;
size_t x = ((s) & 4) >> 1;
// comprs_Kmer |= ((x + ((x ^ (s & 2)) >>1)) << (2*(31-j))); //make it longs[] to support larger kmer
switch(s) { //redefined
case 'A': comprs_Kmer |= ((x + (x^1)) << (2*(31-j))); break;
case 'C': comprs_Kmer |= ((x + (x^0)) << (2*(31-j))); break;
case 'G': comprs_Kmer |= ((x + (x^3)) << (2*(31-j))); break;
case 'T': comprs_Kmer |= ((x + (x^2)) << (2*(31-j))); break;
}
}
if(validKmer){
cur_mini = find_minimizer(comprs_Kmer, klen, mlen, max64);
if(prev_mini == cur_mini){
// printf("mini match %lu %lu \n", cur_mini, comprs_Smer );
char s = seq[c + w + klen - 1];
int j = slen % 32;
size_t x = ((s) & 4) >> 1;
// comprs_Smer |= ((x + ((x ^ (s & 2)) >>1)) << (2*(31-j)));
switch(s) { //redefined
case 'A': comprs_Smer |= ((x + (x^1)) << (2*(31-j))); break;
case 'C': comprs_Smer |= ((x + (x^0)) << (2*(31-j))); break;
case 'G': comprs_Smer |= ((x + (x^3)) << (2*(31-j))); break;
case 'T': comprs_Smer |= ((x + (x^2)) << (2*(31-j))); break;
}
slen++;
}
else {
if(owner > -1 && old_count > -1)
{
outgoing[owner * p_buff_len + old_count] = comprs_Smer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = slen;
}
//* new supermer */
slen = klen;
comprs_Smer = comprs_Kmer;
// owner = cuda_murmur3_64(cur_mini) & (nproc - 1); // remove & with HTcapacity in func
keyType myhash = cuda_MurmurHash3_x64_128((const void *)&cur_mini, 8, 313);
// keyType myhash = cuda_murmur3_64(cur_mini); // remove & with HTcapacity in func
double range = static_cast<double>(myhash) * static_cast<double>(nproc);
owner = range / max64;
old_count = atomicAdd(&owner_counter[owner],1);
if(old_count > p_buff_len ) {
printf("Overflow!! MISSION ABORT!!\n"); return;
}
outgoing[owner * p_buff_len + old_count] = comprs_Smer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = slen;
}
prev_mini = cur_mini;
}
}
if(old_count > -1 && owner > -1) {
outgoing[owner * p_buff_len + old_count] = comprs_Smer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = slen;
}
}
}
void getSupermers_GPU(string seq, int klen, int mlen, int nproc, int *owner_counter,
vector<keyType>& h_send_smers, vector<unsigned char>& h_send_slens, int n_kmers, int rank, int BUFF_SCALE )
{
int count, devId;
char *d_kmers, *d_seq;
keyType *d_supermers, *d_outOverflowBuff;
unsigned char *d_slen;
int *d_owner_counter;
//* Map MPI ranks to GPUs */
hipGetDeviceCount(&count);
int gpuID = rank % count;
hipSetDevice(gpuID);
hipGetDevice(&devId);
unsigned int seq_len = seq.length();
if(seq_len < klen) return;// h_outgoing;
//* Create events for GPU timing */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
cuda_timer_start(start);
// CUDA mallocs
checkCuda (hipMalloc(&d_supermers, n_kmers * BUFF_SCALE * sizeof(keyType)), __LINE__); // giving 2x space to each node
checkCuda (hipMalloc(&d_slen, n_kmers * BUFF_SCALE * sizeof(unsigned char)), __LINE__); // giving 2x space to each node
checkCuda (hipMalloc(&d_seq, seq_len * sizeof(char)), __LINE__);
checkCuda (hipMalloc(&d_owner_counter, nproc * sizeof(int)), __LINE__);
// CUDA memcopies
checkCuda (hipMemcpy(d_seq, &seq[0], seq_len * sizeof(char) , hipMemcpyHostToDevice), __LINE__);
hipMemset(d_supermers, 0, n_kmers * BUFF_SCALE * sizeof(keyType));
hipMemset(d_owner_counter, 0, sizeof(int) * nproc);
int window = 32 - klen;// - mlen + 1 ;
unsigned int p_buff_len = ((n_kmers * BUFF_SCALE) + nproc - 1)/nproc;
int b = 128;
int g = (seq_len + (b*window - 1) ) / (b*window); ;//(seq_len + (b -1) ) / (b);// * window;
int per_block_seq_len = b * window;// ((seq_len+window-1/window) + (g - 1)) / g;
// Kernel call
hipLaunchKernelGGL(( cuda_build_supermer), dim3(g), dim3(b), 0, 0, d_seq, d_kmers, klen, mlen, seq_len, d_supermers, d_slen, d_owner_counter, nproc, p_buff_len, per_block_seq_len, window, rank);
//* Memcopy to CPU */
checkCuda (hipMemcpy(h_send_smers.data(), d_supermers, n_kmers * BUFF_SCALE * sizeof(keyType), hipMemcpyDeviceToHost), __LINE__);
checkCuda (hipMemcpy(h_send_slens.data(), d_slen, n_kmers * BUFF_SCALE * sizeof(unsigned char), hipMemcpyDeviceToHost), __LINE__);
checkCuda (hipMemcpy(owner_counter, d_owner_counter, nproc * sizeof(int) , hipMemcpyDeviceToHost), __LINE__);
// size_t total_counter = 0;
// cout << rank << " smer distribution: ";
// for (int i = 0; i < nproc; ++i) {
// total_counter += owner_counter[i];
// cout << owner_counter[i] << " ";
// // printf("GPU Supermer pack: output buffer: %d %d \n", owner_counter[i], total_counter);
// }
// cout << endl;
hipFree(d_seq);
hipFree(d_supermers);
hipFree(d_slen);
hipFree(d_owner_counter);
cuda_timer_stop(start, stop, milliseconds);
return;
}
__global__ void cu_kcounter_smer(KeyValue* hashtable, const keyType* kvs, const unsigned char* slens, unsigned int numkvs, int klen, keyType mask)
{
unsigned int threadid = blockIdx.x*blockDim.x + threadIdx.x;
if (threadid < numkvs){
keyType new_smer = kvs[threadid];
unsigned char c = slens[threadid];
int slen = (int)c;
//*kmers of the supermer*
for(int k = 0; k < (slen - klen + 1); ++k){
keyType new_key = ((new_smer) >> (2*(31-(klen+k -1)))) & mask;//kvs[threadid];//.key;
keyType slot = cuda_murmur3_64(new_key) & (kHashTableCapacity-1);
while (true){
keyType old_key = atomicCAS(&hashtable[slot].key, kEmpty, new_key);
if (old_key == kEmpty || old_key == new_key) {
atomicAdd(&hashtable[slot].value,1);
break;
}
slot = (slot + 1) & (kHashTableCapacity-1);
}
}
}
}
void GPU_SP_buildCounter(KeyValue* pHashTable, vector<keyType> &recvbuf, vector<unsigned char> &recvbuf_len,
int * recvcnt, uint32_t totrecv, int klen, int rank, int p_buff_len)
{
// Map MPI ranks to GPUs
int count, devId;
hipGetDeviceCount(&count);
int gpuID = rank % count;
hipSetDevice(gpuID);
// hipGetDevice(&devId);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int b = 128;
// Have CUDA calculate the thread block size
keyType mask = pow(2, 2 * klen) - 1;
int mingridsize;
int threadblocksize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, cu_kcounter_smer, 0, 0);
unsigned char * d_slens;
keyType *d_smers;
// unsigned char * h_slens = (unsigned char *) malloc(num_keys * sizeof(unsigned char));
// checkCuda (hipMemcpy(h_slens, d_slen, num_keys * sizeof(unsigned char), hipMemcpyDeviceToHost), __LINE__);
checkCuda( hipMalloc(&d_smers, sizeof(keyType) * totrecv), __LINE__);
checkCuda( hipMalloc(&d_slens, sizeof(unsigned char) * totrecv), __LINE__);
size_t num_keys = 0;
for(uint64_t i= 0; i < nprocs ; ++i) {
if(totrecv > 0) {
checkCuda( hipMemcpy(d_smers + num_keys, &recvbuf[i * p_buff_len], sizeof(keyType) * recvcnt[i], hipMemcpyHostToDevice), __LINE__);
checkCuda( hipMemcpy(d_slens + num_keys, &recvbuf_len[i * p_buff_len], sizeof(unsigned char) * recvcnt[i], hipMemcpyHostToDevice), __LINE__);
}
num_keys += recvcnt[i];
}
int gridsize = ((uint32_t)num_keys + threadblocksize - 1) / threadblocksize;
hipLaunchKernelGGL(( cu_kcounter_smer), dim3(gridsize), dim3(threadblocksize), 0, 0, pHashTable, d_smers, d_slens, (uint32_t)num_keys, klen, mask);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipFree(d_smers);
hipFree(d_slens);
return ;//h_pHashTable;
}
double tot_GPUsmer_alltoallv = 0;
double Exchange_GPUsupermers(vector<keyType> &outgoing, vector<unsigned char> &len_smers,
vector<keyType> &recvbuf, vector<unsigned char> &recvbuf_len,
int *sendcnt, int *recvcnt, int nkmers, int * owner_counter)
{
double tot_exch_time = MPI_Wtime();
// int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
int * rdispls = new int[nprocs];
// int * recvcnt = new int[nprocs];
uint64_t totsend = 0, totrecv = 0;
for (int i=0; i < nprocs; i++) {
sendcnt[i] = owner_counter[i];
totsend += sendcnt[i];
}
free(owner_counter);
CHECK_MPI( MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, MPI_COMM_WORLD) ); // share the request counts
// cout << "recv count " ;
for (int i=0; i < nprocs; i++) {
totrecv += recvcnt[i];
// cout << recvcnt[i] << " ";
}
// cout << endl;
// int64_t totsend = accumulate(sendcnt, sendcnt+nprocs, static_cast<int64_t>(0));
// if (totsend < 0) { cerr << myrank << " detected overflow in totsend calculation, line" << __LINE__ << endl; }
// int64_t totrecv = accumulate(recvcnt, recvcnt+nprocs, static_cast<int64_t>(0));
// if (totrecv < 0) { cerr << myrank << " detected overflow in totrecv calculation, line" << __LINE__ << endl; }
// DBG("totsend=%lld totrecv=%lld\n", (lld) totsend, (lld) totrecv);
int p_buff_len = ((nkmers * BUFF_SCALE) + nprocs - 1)/nprocs;
for (int i=0; i < nprocs; i++) {
sdispls[i] = i * p_buff_len;
rdispls[i] = i * p_buff_len;
}
// uint64_t* recvbuf = (uint64_t*) malloc(nkmers * BUFF_SCALE * sizeof(uint64_t));
// unsigned char* recvbuf_len = (unsigned char*) malloc(nkmers * BUFF_SCALE * sizeof(unsigned char));
double exch_time = MPI_Wtime();
for (int i = 0; i < COMM_ITER; ++i)
{
CHECK_MPI( MPI_Alltoallv(&(outgoing[0]), sendcnt, sdispls, MPI_UINT64_T, &(recvbuf[0]), recvcnt, rdispls, MPI_UINT64_T, MPI_COMM_WORLD) );
CHECK_MPI( MPI_Alltoallv(&(len_smers[0]), sendcnt, sdispls, MPI_UNSIGNED_CHAR, &(recvbuf_len[0]), recvcnt, rdispls, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD) );
}
exch_time = (MPI_Wtime() - exch_time)/COMM_ITER;
tot_GPUsmer_alltoallv += exch_time;
double performance_report_time = 0;//perf_reporting(exch_time, totsend, totrecv);
// checkCuda( hipMalloc(&d_recv_smers, sizeof(keyType) * totrecv), __LINE__);
// checkCuda( hipMalloc(&d_recv_slens, sizeof(unsigned char) * totrecv), __LINE__);
// size_t num_keys = 0;
// for(uint64_t i= 0; i < nprocs ; ++i) {
// if(totrecv > 0) {
// checkCuda( hipMemcpy(d_recv_smers + num_keys, &recvbuf[i * p_buff_len], sizeof(keyType) * recvcnt[i], hipMemcpyHostToDevice), __LINE__);
// checkCuda( hipMemcpy(d_recv_slens + num_keys, &recvbuf_len[i * p_buff_len], sizeof(unsigned char) * recvcnt[i], hipMemcpyHostToDevice), __LINE__);
// }
// num_keys += recvcnt[i];
// }
// if(totsend > 0) {free(outgoing); free(len_smers);}
// if(totrecv > 0) {free(recvbuf); free(recvbuf_len);}
delete(rdispls); delete(sdispls);
tot_exch_time=MPI_Wtime()-tot_exch_time; //-performance_report_time;
return tot_exch_time;
}
| 4fa7505f4fda74479f2d77daf65e8dab26d3d149.cu | #include "Kmer.hpp"
#include "Friends.h"
#include "MPIType.h"
#include "SimpleCount.h"
#include "FriendsMPI.h"
#include "Pack.h"
#include "spmer_kmrCnt.h"
__device__ keyType find_minimizer(keyType kmer, int klen, int mlen, keyType max64){
keyType minimizer = max64;
keyType mask = pow(2, 2 * mlen) - 1;
for (int m = 0; m < (klen - mlen ); ++m){
keyType mmer = (kmer >> (2*(31-(mlen+m -1)))) & mask;
if( mmer < minimizer )
minimizer = mmer;
}
return minimizer;
}
__global__ void cuda_build_supermer(char *seq, char *kmers, int klen, int mlen, unsigned int seq_len,
keyType* outgoing, unsigned char *out_slen, int *owner_counter, int nproc, unsigned int p_buff_len,
int per_block_seq_len, int window, int rank){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & (blockDim.x - 1);
unsigned int gId = (blockIdx.x * blockDim.x + tId);
int st_char_block = blockIdx.x * per_block_seq_len; //first char this block should read
int nKmer = seq_len - klen + 1; //last char is 'a'
keyType max64 = 18446744073709551615;
bool validKmer = true;
int slen = klen;
keyType comprs_Smer = 0;
keyType comprs_Kmer = 0;
int owner = -1;
int old_count=-1;
keyType cur_mini = max64; keyType prev_mini = cur_mini;
//****First kmer of this window *****
int i = st_char_block + laneId * window;
if(i < nKmer) {
comprs_Kmer = 0;
for (int k = 0; k < klen ; ++k) {
char s = seq[i + k ];
if(s == 'a' || s == 'N') {
// w += klen-1;
validKmer = false; break; //FIXIT can have 'n'
}
int j = k % 32;
size_t x = ((s) & 4) >> 1;
// comprs_Kmer |= ((x + ((x ^ (s & 2)) >>1)) << (2*(31-j))); //make it longs[] to support larger kmer
switch(s) { //redefined
case 'A': comprs_Kmer |= ((x + (x^1)) << (2*(31-j))); break;
case 'C': comprs_Kmer |= ((x + (x^0)) << (2*(31-j))); break;
case 'G': comprs_Kmer |= ((x + (x^3)) << (2*(31-j))); break;
case 'T': comprs_Kmer |= ((x + (x^2)) << (2*(31-j))); break;
}
}
if(validKmer){
cur_mini = find_minimizer(comprs_Kmer, klen, mlen, max64);
prev_mini = cur_mini;
// owner = cuda_murmur3_64(cur_mini) & (nproc - 1); // remove & with HTcapacity in func
// keyType myhash = cuda_murmur3_64(cur_mini); // remove & with HTcapacity in func
keyType myhash = cuda_MurmurHash3_x64_128((const void *)&cur_mini, 8, 313);// & (nproc - 1);
double range = static_cast<double>(myhash) * static_cast<double>(nproc);
owner = range / max64;
old_count = atomicAdd(&owner_counter[owner],1);
outgoing[owner * p_buff_len + old_count] = comprs_Kmer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = klen;
}
comprs_Smer = comprs_Kmer;
slen = klen;
int c = st_char_block + (laneId * window);
for(int w = 1; w < window && (c+w) < nKmer ; w++) {
validKmer = true;
comprs_Kmer = 0;
// if ((i + klen-1) > nKmer) return;
for (int k = 0; k < klen ; ++k) {
char s = seq[c + w + k ];
if(s == 'a' || s == 'N') {
// w += klen-1;
validKmer = false; break;
}
int j = k % 32;
size_t x = ((s) & 4) >> 1;
// comprs_Kmer |= ((x + ((x ^ (s & 2)) >>1)) << (2*(31-j))); //make it longs[] to support larger kmer
switch(s) { //redefined
case 'A': comprs_Kmer |= ((x + (x^1)) << (2*(31-j))); break;
case 'C': comprs_Kmer |= ((x + (x^0)) << (2*(31-j))); break;
case 'G': comprs_Kmer |= ((x + (x^3)) << (2*(31-j))); break;
case 'T': comprs_Kmer |= ((x + (x^2)) << (2*(31-j))); break;
}
}
if(validKmer){
cur_mini = find_minimizer(comprs_Kmer, klen, mlen, max64);
if(prev_mini == cur_mini){
// printf("mini match %lu %lu \n", cur_mini, comprs_Smer );
char s = seq[c + w + klen - 1];
int j = slen % 32;
size_t x = ((s) & 4) >> 1;
// comprs_Smer |= ((x + ((x ^ (s & 2)) >>1)) << (2*(31-j)));
switch(s) { //redefined
case 'A': comprs_Smer |= ((x + (x^1)) << (2*(31-j))); break;
case 'C': comprs_Smer |= ((x + (x^0)) << (2*(31-j))); break;
case 'G': comprs_Smer |= ((x + (x^3)) << (2*(31-j))); break;
case 'T': comprs_Smer |= ((x + (x^2)) << (2*(31-j))); break;
}
slen++;
}
else {
if(owner > -1 && old_count > -1)
{
outgoing[owner * p_buff_len + old_count] = comprs_Smer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = slen;
}
//* new supermer */
slen = klen;
comprs_Smer = comprs_Kmer;
// owner = cuda_murmur3_64(cur_mini) & (nproc - 1); // remove & with HTcapacity in func
keyType myhash = cuda_MurmurHash3_x64_128((const void *)&cur_mini, 8, 313);
// keyType myhash = cuda_murmur3_64(cur_mini); // remove & with HTcapacity in func
double range = static_cast<double>(myhash) * static_cast<double>(nproc);
owner = range / max64;
old_count = atomicAdd(&owner_counter[owner],1);
if(old_count > p_buff_len ) {
printf("Overflow!! MISSION ABORT!!\n"); return;
}
outgoing[owner * p_buff_len + old_count] = comprs_Smer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = slen;
}
prev_mini = cur_mini;
}
}
if(old_count > -1 && owner > -1) {
outgoing[owner * p_buff_len + old_count] = comprs_Smer; //hash (longs)
out_slen[owner * p_buff_len + old_count] = slen;
}
}
}
void getSupermers_GPU(string seq, int klen, int mlen, int nproc, int *owner_counter,
vector<keyType>& h_send_smers, vector<unsigned char>& h_send_slens, int n_kmers, int rank, int BUFF_SCALE )
{
int count, devId;
char *d_kmers, *d_seq;
keyType *d_supermers, *d_outOverflowBuff;
unsigned char *d_slen;
int *d_owner_counter;
//* Map MPI ranks to GPUs */
cudaGetDeviceCount(&count);
int gpuID = rank % count;
cudaSetDevice(gpuID);
cudaGetDevice(&devId);
unsigned int seq_len = seq.length();
if(seq_len < klen) return;// h_outgoing;
//* Create events for GPU timing */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cuda_timer_start(start);
// CUDA mallocs
checkCuda (cudaMalloc(&d_supermers, n_kmers * BUFF_SCALE * sizeof(keyType)), __LINE__); // giving 2x space to each node
checkCuda (cudaMalloc(&d_slen, n_kmers * BUFF_SCALE * sizeof(unsigned char)), __LINE__); // giving 2x space to each node
checkCuda (cudaMalloc(&d_seq, seq_len * sizeof(char)), __LINE__);
checkCuda (cudaMalloc(&d_owner_counter, nproc * sizeof(int)), __LINE__);
// CUDA memcopies
checkCuda (cudaMemcpy(d_seq, &seq[0], seq_len * sizeof(char) , cudaMemcpyHostToDevice), __LINE__);
cudaMemset(d_supermers, 0, n_kmers * BUFF_SCALE * sizeof(keyType));
cudaMemset(d_owner_counter, 0, sizeof(int) * nproc);
int window = 32 - klen;// - mlen + 1 ;
unsigned int p_buff_len = ((n_kmers * BUFF_SCALE) + nproc - 1)/nproc;
int b = 128;
int g = (seq_len + (b*window - 1) ) / (b*window); ;//(seq_len + (b -1) ) / (b);// * window;
int per_block_seq_len = b * window;// ((seq_len+window-1/window) + (g - 1)) / g;
// Kernel call
cuda_build_supermer<<<g, b>>>(d_seq, d_kmers, klen, mlen, seq_len, d_supermers, d_slen, d_owner_counter, nproc, p_buff_len, per_block_seq_len, window, rank);
//* Memcopy to CPU */
checkCuda (cudaMemcpy(h_send_smers.data(), d_supermers, n_kmers * BUFF_SCALE * sizeof(keyType), cudaMemcpyDeviceToHost), __LINE__);
checkCuda (cudaMemcpy(h_send_slens.data(), d_slen, n_kmers * BUFF_SCALE * sizeof(unsigned char), cudaMemcpyDeviceToHost), __LINE__);
checkCuda (cudaMemcpy(owner_counter, d_owner_counter, nproc * sizeof(int) , cudaMemcpyDeviceToHost), __LINE__);
// size_t total_counter = 0;
// cout << rank << " smer distribution: ";
// for (int i = 0; i < nproc; ++i) {
// total_counter += owner_counter[i];
// cout << owner_counter[i] << " ";
// // printf("GPU Supermer pack: output buffer: %d %d \n", owner_counter[i], total_counter);
// }
// cout << endl;
cudaFree(d_seq);
cudaFree(d_supermers);
cudaFree(d_slen);
cudaFree(d_owner_counter);
cuda_timer_stop(start, stop, milliseconds);
return;
}
__global__ void cu_kcounter_smer(KeyValue* hashtable, const keyType* kvs, const unsigned char* slens, unsigned int numkvs, int klen, keyType mask)
{
unsigned int threadid = blockIdx.x*blockDim.x + threadIdx.x;
if (threadid < numkvs){
keyType new_smer = kvs[threadid];
unsigned char c = slens[threadid];
int slen = (int)c;
//*kmers of the supermer*
for(int k = 0; k < (slen - klen + 1); ++k){
keyType new_key = ((new_smer) >> (2*(31-(klen+k -1)))) & mask;//kvs[threadid];//.key;
keyType slot = cuda_murmur3_64(new_key) & (kHashTableCapacity-1);
while (true){
keyType old_key = atomicCAS(&hashtable[slot].key, kEmpty, new_key);
if (old_key == kEmpty || old_key == new_key) {
atomicAdd(&hashtable[slot].value,1);
break;
}
slot = (slot + 1) & (kHashTableCapacity-1);
}
}
}
}
void GPU_SP_buildCounter(KeyValue* pHashTable, vector<keyType> &recvbuf, vector<unsigned char> &recvbuf_len,
int * recvcnt, uint32_t totrecv, int klen, int rank, int p_buff_len)
{
// Map MPI ranks to GPUs
int count, devId;
cudaGetDeviceCount(&count);
int gpuID = rank % count;
cudaSetDevice(gpuID);
// cudaGetDevice(&devId);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int b = 128;
// Have CUDA calculate the thread block size
keyType mask = pow(2, 2 * klen) - 1;
int mingridsize;
int threadblocksize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, cu_kcounter_smer, 0, 0);
unsigned char * d_slens;
keyType *d_smers;
// unsigned char * h_slens = (unsigned char *) malloc(num_keys * sizeof(unsigned char));
// checkCuda (cudaMemcpy(h_slens, d_slen, num_keys * sizeof(unsigned char), cudaMemcpyDeviceToHost), __LINE__);
checkCuda( cudaMalloc(&d_smers, sizeof(keyType) * totrecv), __LINE__);
checkCuda( cudaMalloc(&d_slens, sizeof(unsigned char) * totrecv), __LINE__);
size_t num_keys = 0;
for(uint64_t i= 0; i < nprocs ; ++i) {
if(totrecv > 0) {
checkCuda( cudaMemcpy(d_smers + num_keys, &recvbuf[i * p_buff_len], sizeof(keyType) * recvcnt[i], cudaMemcpyHostToDevice), __LINE__);
checkCuda( cudaMemcpy(d_slens + num_keys, &recvbuf_len[i * p_buff_len], sizeof(unsigned char) * recvcnt[i], cudaMemcpyHostToDevice), __LINE__);
}
num_keys += recvcnt[i];
}
int gridsize = ((uint32_t)num_keys + threadblocksize - 1) / threadblocksize;
cu_kcounter_smer<<<gridsize, threadblocksize>>>(pHashTable, d_smers, d_slens, (uint32_t)num_keys, klen, mask);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaFree(d_smers);
cudaFree(d_slens);
return ;//h_pHashTable;
}
double tot_GPUsmer_alltoallv = 0;
double Exchange_GPUsupermers(vector<keyType> &outgoing, vector<unsigned char> &len_smers,
vector<keyType> &recvbuf, vector<unsigned char> &recvbuf_len,
int *sendcnt, int *recvcnt, int nkmers, int * owner_counter)
{
double tot_exch_time = MPI_Wtime();
// int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
int * rdispls = new int[nprocs];
// int * recvcnt = new int[nprocs];
uint64_t totsend = 0, totrecv = 0;
for (int i=0; i < nprocs; i++) {
sendcnt[i] = owner_counter[i];
totsend += sendcnt[i];
}
free(owner_counter);
CHECK_MPI( MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, MPI_COMM_WORLD) ); // share the request counts
// cout << "recv count " ;
for (int i=0; i < nprocs; i++) {
totrecv += recvcnt[i];
// cout << recvcnt[i] << " ";
}
// cout << endl;
// int64_t totsend = accumulate(sendcnt, sendcnt+nprocs, static_cast<int64_t>(0));
// if (totsend < 0) { cerr << myrank << " detected overflow in totsend calculation, line" << __LINE__ << endl; }
// int64_t totrecv = accumulate(recvcnt, recvcnt+nprocs, static_cast<int64_t>(0));
// if (totrecv < 0) { cerr << myrank << " detected overflow in totrecv calculation, line" << __LINE__ << endl; }
// DBG("totsend=%lld totrecv=%lld\n", (lld) totsend, (lld) totrecv);
int p_buff_len = ((nkmers * BUFF_SCALE) + nprocs - 1)/nprocs;
for (int i=0; i < nprocs; i++) {
sdispls[i] = i * p_buff_len;
rdispls[i] = i * p_buff_len;
}
// uint64_t* recvbuf = (uint64_t*) malloc(nkmers * BUFF_SCALE * sizeof(uint64_t));
// unsigned char* recvbuf_len = (unsigned char*) malloc(nkmers * BUFF_SCALE * sizeof(unsigned char));
double exch_time = MPI_Wtime();
for (int i = 0; i < COMM_ITER; ++i)
{
CHECK_MPI( MPI_Alltoallv(&(outgoing[0]), sendcnt, sdispls, MPI_UINT64_T, &(recvbuf[0]), recvcnt, rdispls, MPI_UINT64_T, MPI_COMM_WORLD) );
CHECK_MPI( MPI_Alltoallv(&(len_smers[0]), sendcnt, sdispls, MPI_UNSIGNED_CHAR, &(recvbuf_len[0]), recvcnt, rdispls, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD) );
}
exch_time = (MPI_Wtime() - exch_time)/COMM_ITER;
tot_GPUsmer_alltoallv += exch_time;
double performance_report_time = 0;//perf_reporting(exch_time, totsend, totrecv);
// checkCuda( cudaMalloc(&d_recv_smers, sizeof(keyType) * totrecv), __LINE__);
// checkCuda( cudaMalloc(&d_recv_slens, sizeof(unsigned char) * totrecv), __LINE__);
// size_t num_keys = 0;
// for(uint64_t i= 0; i < nprocs ; ++i) {
// if(totrecv > 0) {
// checkCuda( cudaMemcpy(d_recv_smers + num_keys, &recvbuf[i * p_buff_len], sizeof(keyType) * recvcnt[i], cudaMemcpyHostToDevice), __LINE__);
// checkCuda( cudaMemcpy(d_recv_slens + num_keys, &recvbuf_len[i * p_buff_len], sizeof(unsigned char) * recvcnt[i], cudaMemcpyHostToDevice), __LINE__);
// }
// num_keys += recvcnt[i];
// }
// if(totsend > 0) {free(outgoing); free(len_smers);}
// if(totrecv > 0) {free(recvbuf); free(recvbuf_len);}
delete(rdispls); delete(sdispls);
tot_exch_time=MPI_Wtime()-tot_exch_time; //-performance_report_time;
return tot_exch_time;
}
|
1ede2bbedd49cac2cd9b8af0045bd5b13be3a7d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "cudaProj1.h"
__global__
void Kernel() {
int i = threadIdx.x;
if (i==2) {
printf("Testing debugging!\n");
}
printf("in Kernel %d.\n", i);
}
cudaProj1::cudaProj1(/* args */)
{
}
cudaProj1::~cudaProj1()
{
}
void cudaProj1::startKernel() {
hipLaunchKernelGGL(( Kernel), dim3(1),dim3(3), 0, 0, );
hipDeviceSynchronize();
} | 1ede2bbedd49cac2cd9b8af0045bd5b13be3a7d4.cu | #include "stdio.h"
#include "cudaProj1.h"
__global__
void Kernel() {
int i = threadIdx.x;
if (i==2) {
printf("Testing debugging!\n");
}
printf("in Kernel %d.\n", i);
}
cudaProj1::cudaProj1(/* args */)
{
}
cudaProj1::~cudaProj1()
{
}
void cudaProj1::startKernel() {
Kernel<<<1,3>>>();
cudaDeviceSynchronize();
} |
9c0aa6e8b84c999191b689daf5352076abd481d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double* __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
double* __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4);
double t2=0.0f, t3=0.0f, out = 0.0f;
double b2=0.0f, b3=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
double __temp_3__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_0__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t2 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b2 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
double __temp_3__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_1__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t3 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b3 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = out;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b2;
b2 = t2;
t2 = 0.0;
out= b3;
b3 = t3;
t3 = 0.0;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| 9c0aa6e8b84c999191b689daf5352076abd481d6.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double* __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
double* __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4);
double t2=0.0f, t3=0.0f, out = 0.0f;
double b2=0.0f, b3=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
double __temp_3__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_0__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t2 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b2 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
double __temp_3__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_1__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t3 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b3 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = out;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b2;
b2 = t2;
t2 = 0.0;
out= b3;
b3 = t3;
t3 = 0.0;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
a5dbf7ca69ef47a95fbec18d5732380d74032032.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMath.hip"
#else
#include <algorithm>
#include <ATen/hip/HIPContext.h>
#include <ATen/MemoryOverlap.h>
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
#if !defined(THC_REAL_IS_BOOL) /* non bool only part */
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? ::min((int64_t)size0, (int64_t)size1 - k)
: ::min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(::min(
(int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
(int64_t)size));
dim3 grid(::min(
(int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_(stride)(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(::min(
(int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
(int64_t)size));
dim3 grid(::min(
(int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
#endif
| a5dbf7ca69ef47a95fbec18d5732380d74032032.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMath.cu"
#else
#include <algorithm>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/MemoryOverlap.h>
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
c10::cuda::getCurrentCUDAStream()));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
#if !defined(THC_REAL_IS_BOOL) /* non bool only part */
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? std::min((int64_t)size0, (int64_t)size1 - k)
: std::min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(std::min(
(int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
(int64_t)size));
dim3 grid(std::min(
(int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<scalar_t><<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_(stride)(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(std::min(
(int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
(int64_t)size));
dim3 grid(std::min(
(int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<scalar_t><<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
#endif
|
05796d3ccea5d31b7ccc8fc24dfbac43e4390230.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NVMATRIX_EXPORT
#define _NVMATRIX_EXPORT
#endif
#include <set>
#include <vector>
#include <assert.h>
#include <rocblas.h>
#include <cutil_inline.h>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <typeinfo>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <map>
using namespace std;
/*
* Device random number generator pointers.
*/
//map<int,hiprandGenerator_t> NVMatrix::rndGen;
map<int,hiprandState_t*> NVMatrix::rndDevStates;
pthread_mutex_t* NVMatrix::_rndMutex = makeMutex();
pthread_mutex_t* NVMatrix::makeMutex() {
pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t));
pthread_mutex_init(m, NULL);
return m;
}
NVMatrix::NVMatrix(const CudaNdarray * view,
int numRows, int numCols, const char * msg)
{
if (!CudaNdarray_is_c_contiguous(view))
{
printf("Non contiguous input: %s\n", msg);
printf("Dims: ");
for (int i=0; i < view->nd; i++)
printf("%d ",CudaNdarray_HOST_STRIDES(view)[i]);
printf("\n");
assert(false);
}
//Check that view actually contains numRows * numCols elements
const int * dims = CudaNdarray_HOST_DIMS(view);
int total = 1;
for (int i = 0; i < view->nd; i++)
{
total *= dims[i];
}
if (total != numRows * numCols)
{
fprintf(stderr, "NVMatrix asked to make a view of a CudaNdarray with %d elements",total);
fprintf(stderr, " but told to arrange these in a %d x %d rectangle (of total size %d).\n",
numRows, numCols, numRows * numCols);
fprintf(stderr, "CudaNdarray dims: ");
for (int i = 0; i < view->nd; i++)
fprintf(stderr, "%d ", dims[i]);
fprintf(stderr, "\n");
assert(false);
}
//Make the view
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = false;
_isTrans = false;
_devData = view->devdata;
_stride = getLeadingDim();
}
void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) {
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = true;
_isTrans = isTrans;
_devData = NULL;
if (_numElements > 0) {
hipblasAlloc(_numElements, sizeof(float), (void**) &_devData);
checkCublasError("!!!! device memory allocation error\n");
}
_stride = stride < 0 ? getLeadingDim() : stride;
}
NVMatrix::NVMatrix() {
_init(0, 0, -1, false);
}
NVMatrix::NVMatrix(bool isTrans) {
_init(0, 0, -1, isTrans);
}
NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) {
_init(numRows, numCols, -1, isTrans);
}
/*
NVMatrix::NVMatrix(const Matrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols(), -1, like.isTrans());
if (copy) {
copyFromHost(like);
}
}
*/
NVMatrix::NVMatrix(const NVMatrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols(), -1, like.isTrans());
if (copy) {
like.copy(*this);
}
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const NVMatrix& like) {
_init(like.getNumRows(), like.getNumCols(), -1, like.isTrans());
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
NVMatrix::NVMatrix(const Matrix& like) {
_init(like.getNumRows(), like.getNumCols(), -1, false);
}
*/
NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) :
_numRows(numRows),
_numCols(numCols),
_numElements(numRows*numCols),
_ownsData(false),
_devData(devData),
_isTrans(isTrans) {
_stride = stride < 0 ? getLeadingDim() : stride;
}
NVMatrix::~NVMatrix() {
if(_ownsData && _numElements > 0) {
// This line was modified by Ian Goodfellow to use device_free
// so that theano may keep track of device memory usage
int status = device_free(_devData);
if (status != 0) {
fprintf(stderr, "!!!! memory free error\n");
exit(EXIT_FAILURE);
}
}
}
/*
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) {
if (resizeDeviceMatrix) {
resize(hostMatrix);
}
copyFromHost(hostMatrix);
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix) {
// assert(getStride() == getLeadingDim());
assert(isSameDims(hostMatrix));
setTrans(hostMatrix.isTrans());
if (getNumElements() > 0) {
cublasStatus status = hipblasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float),
hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write)\n");
exit( EXIT_FAILURE);
}
}
}
void NVMatrix::copyToHost(Matrix& hostMatrix) const {
// assert(getStride() == getLeadingDim());
assert(isSameDims(hostMatrix));
hostMatrix.setTrans(_isTrans);
if (getNumElements() > 0) {
// printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride());
cublasStatus status = hipblasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float),
_devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim());
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read)\n");
exit( EXIT_FAILURE);
}
}
}
void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const {
if (resizeTarget) {
hostMatrix.resize(_numRows, _numCols);
}
copyToHost(hostMatrix);
}
*/
void NVMatrix::copy(NVMatrix& dest) const {
dest.resize(*this);
copy(dest, 0, -1, 0, -1, 0, 0);
}
NVMatrix& NVMatrix::copy() const {
NVMatrix* c = new NVMatrix();
copy(*c);
return *c;
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const {
assert(isContiguous() && b.isContiguous() && target.isContiguous());
// assert(&target != &b);
assert(_numCols == b.getNumRows());
if(&target != this) {
target.resize(_numRows, b.getNumCols());
target.setTrans(true);
}
assert(target.getNumRows() == _numRows);
assert(target.getNumCols() == b.getNumCols());
if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer.");
}
hipblasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols,
scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(),
0, target.getDevData(), getNumRows());
checkCublasError("hipblasSgemm failed");
// hipDeviceSynchronize();
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) {
rightMult(b, scaleAB, *this);
}
void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const {
rightMult(b, 1, target);
}
/*
* This will only work if this matrix is in column-major order! In other words,
* if isTrans() returns true.
*/
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) {
if (scaleThis == 0) {
a.rightMult(b, scaleAB, *this);
return;
}
assert(isContiguous());
assert(a.getNumCols() == b.getNumRows());
assert(this->getNumRows() == a.getNumRows());
assert(this->getNumCols() == b.getNumCols());
assert(_isTrans);
if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer.");
}
hipblasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(),
scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(),
scaleThis, _devData, getLeadingDim());
checkCublasError("hipblasSgemm failed");
// hipDeviceSynchronize();
}
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) {
addProduct(a, b, 1, 1);
}
template <class Randomizer>
void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) {
assert(isRndInitialized());
assert(isContiguous() && target.isContiguous());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
hipLaunchKernelGGL(( kUnaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
cutilCheckMsg("kUnaryRandomize: Kernel execution failed");
}
template <class Randomizer>
void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) {
assert(isRndInitialized());
assert(isContiguous() && data2.isContiguous() && target.isContiguous());
assert(isSameDims(data2));
assert(isTrans() == data2.isTrans());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
hipLaunchKernelGGL(( kBinaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
cutilCheckMsg("kBinaryRandomize: Kernel execution failed");
}
/* Function removed by Ian Goodfellow.
We do not need this function in theano / pylearn2 and it uses hipMalloc directly.
If you need to enable it, modify it to use device_malloc instead.
Otherwise, theano will not be able to keep track of how much memory is used on
the device.
void NVMatrix::initRandom(unsigned long long seed) {
assert(!isRndInitialized());
pthread_mutex_lock(_rndMutex);
int d = getDeviceID();
rndDevStates[d] = NULL;
CUDA_CALL(hipMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(hiprandState_t)));
pthread_mutex_unlock(_rndMutex);
kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one
cutilCheckMsg("initRandom: Kernel execution failed");
}
void NVMatrix::initRandom() {
NVMatrix::initRandom(time(0));
}
*/
hiprandState_t* NVMatrix::getCurandState() {
pthread_mutex_lock(_rndMutex);
int d = getDeviceID();
assert(rndDevStates.count(d) != 0);
hiprandState_t* r = rndDevStates[d];
pthread_mutex_unlock(_rndMutex);
return r;
}
int NVMatrix::getDeviceID() {
int d;
hipGetDevice(&d);
return d;
}
bool NVMatrix::isRndInitialized() {
pthread_mutex_lock(_rndMutex);
bool b = rndDevStates.count(getDeviceID()) != 0;
pthread_mutex_unlock(_rndMutex);
return b;
}
/* Function removed by Ian Goodfellow due to not needing
it and it using hipFree instead of device_free
void NVMatrix::destroyRandom() {
assert(isRndInitialized());
int d = getDeviceID();
pthread_mutex_lock(_rndMutex);
CUDA_CALL(hipFree(rndDevStates[d]));
rndDevStates.erase(d);
pthread_mutex_unlock(_rndMutex);
} */
void NVMatrix::binarizeProbs() {
binarizeProbs(*this);
}
void NVMatrix::binarizeProbs(NVMatrix& target) {
_unaryRandomize(target, BinarizeUnaryRandomizer());
}
void NVMatrix::randomizeUniform() {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements()));
_unaryRandomize(*this, UniformUnaryRandomizer());
}
void NVMatrix::randomizeGaussian() {
randomizeGaussian(1);
}
void NVMatrix::randomizeGaussian(float stdev) {
randomizeGaussian(0, stdev);
}
void NVMatrix::randomizeGaussian(float mean, float stdev) {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(hiprandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev));
_unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev));
}
/*
* Kind of a hack since we don't actually need the contents of this matrix for it,
* so we don't really need a binary randomizer.
*/
void NVMatrix::randomizeGaussian(NVMatrix& stdevs) {
_binaryRandomize(stdevs, *this, GaussianBinaryRandomizer());
}
void NVMatrix::addGaussianNoise() {
addGaussianNoise(1);
}
void NVMatrix::addGaussianNoise(float stdev) {
addGaussianNoise(stdev, *this);
}
void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) {
_unaryRandomize(target, AddGaussianUnaryRandomizer(stdev));
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) {
addGaussianNoise(stdevs, var, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs) {
addGaussianNoise(stdevs, false, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) {
if (var) {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>());
} else {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>());
}
}
void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target);
}
void NVMatrix::biggerThan(NVMatrix& b) {
biggerThan(b, *this);
}
void NVMatrix::equals(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Equals(), b, target);
}
void NVMatrix::equals(NVMatrix& m) {
equals(m, *this);
}
void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target);
}
void NVMatrix::biggerThanVector(NVMatrix& vec) {
biggerThanVector(vec, *this);
}
void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const {
assert(startRow >= 0 && startRow < _numRows);
assert(endRow > startRow && endRow <= _numRows);
assert(startCol >= 0 && startCol < _numCols);
assert(endCol > startCol && endCol <= _numCols);
}
/*
* The only place where stride is supported for now!
* Will ALWAYS return a view of the original data, sometimes non-contiguous.
*/
NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
if (!isTrans()) {
return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false);
}
return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true);
}
/* this will NEVER return a view */
void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
int sliceRows = endRow - startRow, sliceCols = endCol - startCol;
if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) {
target.resize(sliceRows, sliceCols);
}
this->copy(target, startRow, endRow, startCol, endCol, 0, 0);
}
NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const {
return slice(startRow, endRow, 0, -1);
}
void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const {
slice(startRow, endRow, 0, -1, target);
}
NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const {
return slice(0, -1, startCol, endCol);
}
void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const {
slice(0, -1, startCol, endCol, target);
}
/*
* Guaranteed to not change the data if the number of elements doesn't change.
* So you can use this to "reshape" a matrix.
*/
bool NVMatrix::resize(int numRows, int numCols) {
bool reallocated = false;
if (numRows != _numRows || numCols != _numCols) {
// this assertion was removed by Ian Goodfellow because it seems to come too early
// assert(_ownsData);
if (_numElements != numRows * numCols) {
assert(_ownsData); // assert moved here by Ian Goodfellow
if (_numElements > 0) { // free old memory
// This line was modified by Ian Goodfellow to use device_free so theano may track device memory usage accurately
int status = device_free(_devData);
if (status != 0) {
fprintf(stderr, "!!!! memory free error: %X\n", status);
exit(EXIT_FAILURE);
}
}
if (numRows * numCols > 0) { // allocate new memory
cublasStatus status = hipblasAlloc(numCols * numRows, sizeof(float), (void**) &_devData);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device memory allocation error\n");
exit(EXIT_FAILURE);
}
} else {
_devData = NULL;
}
reallocated = true;
}
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_stride = getLeadingDim();
}
return reallocated;
}
bool NVMatrix::resize(const NVMatrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
/*
bool NVMatrix::resize(const Matrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
*/
void NVMatrix::reshape(int numRows, int numCols) {
assert(isContiguous());
assert(_numElements == numRows*numCols);
_numRows = numRows;
_numCols = numCols;
_stride = getLeadingDim();
}
NVMatrix& NVMatrix::reshaped(int numRows, int numCols) {
assert(isContiguous());
assert(_numElements == numRows*numCols);
return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans);
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol) const {
srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow;
srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol;
NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol);
NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol);
srcSlice->apply(NVMatrixOps::Identity(), *destSlice);
delete srcSlice;
delete destSlice;
}
NVMatrix& NVMatrix::getTranspose() {
return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);;
}
void NVMatrix::transpose(NVMatrix& target) {
flipTrans(target);
target.setTrans(!target.isTrans());
target.reshape(target.getNumCols(), target.getNumRows());
}
void NVMatrix::transpose() {
int tmp = _numCols;
_numCols = _numRows;
_numRows = tmp;
_isTrans = !_isTrans;
}
bool NVMatrix::transpose(bool trans) {
bool oldTrans = _isTrans;
if (oldTrans != trans) {
transpose();
}
return oldTrans;
}
/*
* Flips the ordering of the matrix from row-major to column-major and vice versa.
* This creates temporary storage -- not a cheap operation.
*
* This is not equivalent to a "hard transpose". The resultant matrix still has
* the same dimensions, its layout in memory just changes.
*/
NVMatrix& NVMatrix::flipTrans() {
NVMatrix* meTrans = new NVMatrix(*this);
flipTrans(*meTrans);
return *meTrans;
}
void NVMatrix::flipTrans(NVMatrix& target) {
assert(&target != this);
target.resize(_numRows, _numCols);
target.setTrans(!isTrans());
apply(NVMatrixOps::Identity(), target);
}
void NVMatrix::squaredDiff(NVMatrix& b) {
squaredDiff(b, *this);
}
void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) {
if (scaleA == 0) {
b.scale(scaleB, target);
return;
}
if (scaleA == 1 && scaleB == 1) { // slight optimization
applyBinary(NVMatrixBinaryOps::Add(), b, target);
} else {
applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target);
}
}
void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) {
add(b, 1, scaleB, target);
}
void NVMatrix::add(NVMatrix& b, NVMatrix& target) {
add(b, 1, target);
}
void NVMatrix::add(NVMatrix& b, float scaleB) {
add(b, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) {
add(b, scaleA, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b) {
add(b, 1, *this);
}
void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) {
add(b, -1, target);
}
void NVMatrix::subtract(NVMatrix& b) {
add(b, -1);
}
void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Multiply(), b, target);
}
void NVMatrix::eltwiseMult(NVMatrix& b) {
eltwiseMult(b, *this);
}
void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Divide(), b, target);
}
void NVMatrix::eltwiseDivide(NVMatrix& b) {
eltwiseDivide(b, *this);
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) {
assert(isContiguous() && target.isContiguous());
assert(timesX > 0 && timesY > 0);
target.resize(_numRows*timesY, _numCols*timesX);
target.setTrans(_isTrans);
if(!isTrans()) {
hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numCols, _numRows, target._numCols, target._numRows);
} else {
hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numRows, _numCols, target._numRows, target._numCols);
}
cutilCheckMsg("Kernel execution failed");
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target);
}
void NVMatrix::addVector(NVMatrix& vec) {
addVector(vec, 1, *this);
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec) {
addVector(vec, scaleVec, *this);
}
void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) {
addVector(vec, 1, target);
}
void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target);
}
void NVMatrix::equalsVector(NVMatrix& vec) {
equalsVector(vec, *this);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec) {
eltwiseMultByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) {
eltwiseDivideByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target);
}
/*
* num threads per block is ignored when summing rows (axis=1) because
* it has to be a power of 2.
*
* TODO: this is a mess, fix it. it works pretty fast but it's too ugly.
* TODO: this function is _really_ bad for very long aggregations of few columns.
*/
template<class Agg, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) {
assert(axis == 0 || axis == 1);
assert(isContiguous() && target.isContiguous());
assert(&target != this);
int width = _isTrans ? _numRows : _numCols;
int height = _isTrans ? _numCols : _numRows;
target.setTrans(_isTrans);
assert(width > 0);
assert(height > 0);
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum
target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1);
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width);
assert(numBlocks < NUM_BLOCKS_MAX);
hipLaunchKernelGGL(( kDumbAggCols<Agg, BinaryOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, width, height, agg, op);
cutilCheckMsg("kDumbAggCols: Kernel execution failed");
} else { // row sum
target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1);
if (width > 1) {
if (height >= 16384) { // linear aggregation
int numBlocksX = 1;
int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X;
int numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while (numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY,2);
numBlocksX *= 2;
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
if(width <= 16) {
if(width <= 4) {
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 4>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
} else if(width <= 8) {
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 8>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
} else if(width <= 12) {
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 12>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
} else {
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 16>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
}
} else if(width <= 32) {
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
} else if(width <= 48){
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
} else if(width <= 64){
hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
} else {
hipLaunchKernelGGL(( kAggShortRows2<Agg, BinaryOp>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op);
}
} else {
if (width >= 512) {
dim3 threads(AWR_NUM_THREADS);
dim3 blocks(1, ::min(1024, height));
hipLaunchKernelGGL(( kAggRows_wholerow_nosync), dim3(blocks), dim3(threads), 0, 0, _devData, target._devData, width, height, agg, op);
// dim3 threads(AWR_NUM_THREADS);
// dim3 blocks(1, ::min(1024, height));
// kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op);
} else {
// dim3 threads(AWR_NUM_THREADS);
// dim3 blocks(1, ::min(1024, height));
// kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op);
NVMatrix *prevSum = this;
while (prevSum->getLeadingDim() > 1) {
int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
int numThreadsY = 1;
int numBlocksX = DIVUP(width, 2*numThreadsX);
int numBlocksY = ::min(height, NUM_BLOCKS_MAX);
NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false);
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
assert(numBlocksX <= NUM_BLOCKS_MAX);
assert(numBlocksY <= NUM_BLOCKS_MAX);
if(width <= 64) {
hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 32>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else if(width <= 128) {
hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 64>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else if(width <= 256) {
hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 128>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else if(width <= 512) {
hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 256>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else {
hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 512>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
}
cutilCheckMsg("agg rows: Kernel execution failed");
hipDeviceSynchronize();
width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway
if (prevSum != this) {
delete prevSum;
}
prevSum = nvSumAccum;
}
}
}
} else {
copy(target);
}
}
}
void NVMatrix::inRangeInc(float lower, float upper) {
inRangeInc(lower, upper, *this);
}
void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<false>(lower, upper), target);
}
void NVMatrix::inRangeExc(float lower, float upper) {
inRangeExc(lower, upper, *this);
}
void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<true>(lower, upper), target);
}
void NVMatrix::biggerThanScalar(float scalar) {
biggerThanScalar(scalar, *this);
}
void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::BiggerThanScalar(scalar), target);
}
void NVMatrix::smallerThanScalar(float scalar) {
smallerThanScalar(scalar, *this);
}
void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::SmallerThanScalar(scalar), target);
}
void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) {
apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target);
}
void NVMatrix::addScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::AddScalar(scalar), target);
}
void NVMatrix::addScalar(float scalar) {
addScalar(scalar, *this);
}
void NVMatrix::minWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MinWithScalar(scalar), target);
}
void NVMatrix::minWithScalar(float scalar) {
minWithScalar(scalar, *this);
}
void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MaxWithScalar(scalar), target);
}
void NVMatrix::maxWithScalar(float scalar) {
maxWithScalar(scalar, *this);
}
void NVMatrix::pow(float p, NVMatrix& target) {
apply(NVMatrixOps::Pow(p), target);
}
void NVMatrix::pow(float p) {
pow(p, *this);
}
void NVMatrix::scale(float _scale) {
scale(_scale, *this);
}
void NVMatrix::scale(float _scale, NVMatrix& target) {
if (_scale != 1 || &target != this) { // optimize away scale by 1
apply(NVMatrixOps::MultByScalar(_scale), target);
}
}
template<class Agg, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) {
NVMatrix *sumVec = new NVMatrix();
_aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op);
return *sumVec;
}
void NVMatrix::max(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) {
if (scaleThis != 0) {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum));
} else {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum));
}
}
void NVMatrix::sum(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second());
}
/*
void NVMatrix::min(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::max(int axis) {
return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::sum(int axis) {
return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::min(int axis) {
return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
*/
void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) {
int logn = int(ceil(log(double(n)) / log(2.)));
*numCols = DIVUP(n, logn);
int numThreads = *numCols;
*blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE));
*threads = dim3(DP_BLOCKSIZE);
}
/*
float NVMatrix::mean() {
return sum() / getNumElements();
}
float NVMatrix::sum() {
return _totalAgg(NVMatrixAggs::Sum());
}
float NVMatrix::max() {
return _totalAgg(NVMatrixAggs::Max());
}
float NVMatrix::min() {
return _totalAgg(NVMatrixAggs::Min());
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg) {
assert(isContiguous());
dim3 blocks, threads;
int numCols;
// Sum most of it on GPU
NVMatrix* src = this;
for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) {
_sum_setParams(src->getNumElements(), &blocks, &threads, &numCols);
target = new NVMatrix(1, blocks.x);
kTotalAgg<<<blocks, threads>>>(src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg);
cutilCheckMsg("kTotalAgg: Kernel execution failed");
hipDeviceSynchronize(); // not really necessary?
delete (src == this ? NULL : src);
}
Matrix srcCPU(src->getNumRows(), src->getNumCols());
src->copyToHost(srcCPU);
if (src->getNumElements() > 1) { // Sum remainder on CPU
delete (src == this ? NULL : src);
if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) {
return srcCPU.sum();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) {
return srcCPU.max();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) {
return srcCPU.min();
} else {
assert(false);
}
}
return srcCPU(0,0);
}
*/
/*
* Fast dot product only for matrices with same transposedness.
float NVMatrix::dotProduct(NVMatrix& b) {
assert(isContiguous() && b.isContiguous());
assert(isSameDims(b));
assert(isTrans() == b.isTrans()); // see?
dim3 blocks, threads;
int numCols;
_sum_setParams(getNumElements(), &blocks, &threads, &numCols);
NVMatrix target(1, blocks.x);
kDotProduct_r<<<blocks, threads>>>(getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements());
cutilCheckMsg("kDotProduct: Kernel execution failed");
hipDeviceSynchronize();
return target.sum();
}
float NVMatrix::norm2() {
return dotProduct(*this);
}
float NVMatrix::norm() {
return sqrt(norm2());
}
*/
/*
void NVMatrix::print(int startRow, int rows, int startCol, int cols) const {
hipDeviceSynchronize();
Matrix hm = Matrix(_numRows, _numCols);
copyToHost(hm);
hm.print(startRow, rows, startCol, cols);
}
void NVMatrix::print(int rows, int cols) const {
print(0, rows, 0, cols);
}
*/
void NVMatrix::printShape(const char* name) const {
printf("%s: %dx%d\n", name, _numRows, _numCols);
}
| 05796d3ccea5d31b7ccc8fc24dfbac43e4390230.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NVMATRIX_EXPORT
#define _NVMATRIX_EXPORT
#endif
#include <set>
#include <vector>
#include <assert.h>
#include <cublas.h>
#include <cutil_inline.h>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <typeinfo>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <map>
using namespace std;
/*
* Device random number generator pointers.
*/
//map<int,curandGenerator_t> NVMatrix::rndGen;
map<int,curandState*> NVMatrix::rndDevStates;
pthread_mutex_t* NVMatrix::_rndMutex = makeMutex();
pthread_mutex_t* NVMatrix::makeMutex() {
pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t));
pthread_mutex_init(m, NULL);
return m;
}
NVMatrix::NVMatrix(const CudaNdarray * view,
int numRows, int numCols, const char * msg)
{
if (!CudaNdarray_is_c_contiguous(view))
{
printf("Non contiguous input: %s\n", msg);
printf("Dims: ");
for (int i=0; i < view->nd; i++)
printf("%d ",CudaNdarray_HOST_STRIDES(view)[i]);
printf("\n");
assert(false);
}
//Check that view actually contains numRows * numCols elements
const int * dims = CudaNdarray_HOST_DIMS(view);
int total = 1;
for (int i = 0; i < view->nd; i++)
{
total *= dims[i];
}
if (total != numRows * numCols)
{
fprintf(stderr, "NVMatrix asked to make a view of a CudaNdarray with %d elements",total);
fprintf(stderr, " but told to arrange these in a %d x %d rectangle (of total size %d).\n",
numRows, numCols, numRows * numCols);
fprintf(stderr, "CudaNdarray dims: ");
for (int i = 0; i < view->nd; i++)
fprintf(stderr, "%d ", dims[i]);
fprintf(stderr, "\n");
assert(false);
}
//Make the view
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = false;
_isTrans = false;
_devData = view->devdata;
_stride = getLeadingDim();
}
void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) {
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_ownsData = true;
_isTrans = isTrans;
_devData = NULL;
if (_numElements > 0) {
cublasAlloc(_numElements, sizeof(float), (void**) &_devData);
checkCublasError("!!!! device memory allocation error\n");
}
_stride = stride < 0 ? getLeadingDim() : stride;
}
NVMatrix::NVMatrix() {
_init(0, 0, -1, false);
}
NVMatrix::NVMatrix(bool isTrans) {
_init(0, 0, -1, isTrans);
}
NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) {
_init(numRows, numCols, -1, isTrans);
}
/*
NVMatrix::NVMatrix(const Matrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols(), -1, like.isTrans());
if (copy) {
copyFromHost(like);
}
}
*/
NVMatrix::NVMatrix(const NVMatrix& like, bool copy) {
_init(like.getNumRows(), like.getNumCols(), -1, like.isTrans());
if (copy) {
like.copy(*this);
}
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const NVMatrix& like) {
_init(like.getNumRows(), like.getNumCols(), -1, like.isTrans());
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
NVMatrix::NVMatrix(const Matrix& like) {
_init(like.getNumRows(), like.getNumCols(), -1, false);
}
*/
NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) :
_numRows(numRows),
_numCols(numCols),
_numElements(numRows*numCols),
_ownsData(false),
_devData(devData),
_isTrans(isTrans) {
_stride = stride < 0 ? getLeadingDim() : stride;
}
NVMatrix::~NVMatrix() {
if(_ownsData && _numElements > 0) {
// This line was modified by Ian Goodfellow to use device_free
// so that theano may keep track of device memory usage
int status = device_free(_devData);
if (status != 0) {
fprintf(stderr, "!!!! memory free error\n");
exit(EXIT_FAILURE);
}
}
}
/*
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) {
if (resizeDeviceMatrix) {
resize(hostMatrix);
}
copyFromHost(hostMatrix);
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix) {
// assert(getStride() == getLeadingDim());
assert(isSameDims(hostMatrix));
setTrans(hostMatrix.isTrans());
if (getNumElements() > 0) {
cublasStatus status = cublasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float),
hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write)\n");
exit( EXIT_FAILURE);
}
}
}
void NVMatrix::copyToHost(Matrix& hostMatrix) const {
// assert(getStride() == getLeadingDim());
assert(isSameDims(hostMatrix));
hostMatrix.setTrans(_isTrans);
if (getNumElements() > 0) {
// printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride());
cublasStatus status = cublasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float),
_devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim());
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read)\n");
exit( EXIT_FAILURE);
}
}
}
void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const {
if (resizeTarget) {
hostMatrix.resize(_numRows, _numCols);
}
copyToHost(hostMatrix);
}
*/
void NVMatrix::copy(NVMatrix& dest) const {
dest.resize(*this);
copy(dest, 0, -1, 0, -1, 0, 0);
}
NVMatrix& NVMatrix::copy() const {
NVMatrix* c = new NVMatrix();
copy(*c);
return *c;
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const {
assert(isContiguous() && b.isContiguous() && target.isContiguous());
// assert(&target != &b);
assert(_numCols == b.getNumRows());
if(&target != this) {
target.resize(_numRows, b.getNumCols());
target.setTrans(true);
}
assert(target.getNumRows() == _numRows);
assert(target.getNumCols() == b.getNumCols());
if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer.");
}
cublasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols,
scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(),
0, target.getDevData(), getNumRows());
checkCublasError("cublasSgemm failed");
// cudaThreadSynchronize();
}
void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) {
rightMult(b, scaleAB, *this);
}
void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const {
rightMult(b, 1, target);
}
/*
* This will only work if this matrix is in column-major order! In other words,
* if isTrans() returns true.
*/
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) {
if (scaleThis == 0) {
a.rightMult(b, scaleAB, *this);
return;
}
assert(isContiguous());
assert(a.getNumCols() == b.getNumRows());
assert(this->getNumRows() == a.getNumRows());
assert(this->getNumCols() == b.getNumCols());
assert(_isTrans);
if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) {
WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer.");
}
cublasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(),
scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(),
scaleThis, _devData, getLeadingDim());
checkCublasError("cublasSgemm failed");
// cudaThreadSynchronize();
}
void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) {
addProduct(a, b, 1, 1);
}
template <class Randomizer>
void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) {
assert(isRndInitialized());
assert(isContiguous() && target.isContiguous());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
kUnaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
cutilCheckMsg("kUnaryRandomize: Kernel execution failed");
}
template <class Randomizer>
void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) {
assert(isRndInitialized());
assert(isContiguous() && data2.isContiguous() && target.isContiguous());
assert(isSameDims(data2));
assert(isTrans() == data2.isTrans());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
kBinaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
cutilCheckMsg("kBinaryRandomize: Kernel execution failed");
}
/* Function removed by Ian Goodfellow.
We do not need this function in theano / pylearn2 and it uses cudaMalloc directly.
If you need to enable it, modify it to use device_malloc instead.
Otherwise, theano will not be able to keep track of how much memory is used on
the device.
void NVMatrix::initRandom(unsigned long long seed) {
assert(!isRndInitialized());
pthread_mutex_lock(_rndMutex);
int d = getDeviceID();
rndDevStates[d] = NULL;
CUDA_CALL(cudaMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(curandState)));
pthread_mutex_unlock(_rndMutex);
kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one
cutilCheckMsg("initRandom: Kernel execution failed");
}
void NVMatrix::initRandom() {
NVMatrix::initRandom(time(0));
}
*/
curandState* NVMatrix::getCurandState() {
pthread_mutex_lock(_rndMutex);
int d = getDeviceID();
assert(rndDevStates.count(d) != 0);
curandState* r = rndDevStates[d];
pthread_mutex_unlock(_rndMutex);
return r;
}
int NVMatrix::getDeviceID() {
int d;
cudaGetDevice(&d);
return d;
}
bool NVMatrix::isRndInitialized() {
pthread_mutex_lock(_rndMutex);
bool b = rndDevStates.count(getDeviceID()) != 0;
pthread_mutex_unlock(_rndMutex);
return b;
}
/* Function removed by Ian Goodfellow due to not needing
it and it using cudaFree instead of device_free
void NVMatrix::destroyRandom() {
assert(isRndInitialized());
int d = getDeviceID();
pthread_mutex_lock(_rndMutex);
CUDA_CALL(cudaFree(rndDevStates[d]));
rndDevStates.erase(d);
pthread_mutex_unlock(_rndMutex);
} */
void NVMatrix::binarizeProbs() {
binarizeProbs(*this);
}
void NVMatrix::binarizeProbs(NVMatrix& target) {
_unaryRandomize(target, BinarizeUnaryRandomizer());
}
void NVMatrix::randomizeUniform() {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements()));
_unaryRandomize(*this, UniformUnaryRandomizer());
}
void NVMatrix::randomizeGaussian() {
randomizeGaussian(1);
}
void NVMatrix::randomizeGaussian(float stdev) {
randomizeGaussian(0, stdev);
}
void NVMatrix::randomizeGaussian(float mean, float stdev) {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(curandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev));
_unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev));
}
/*
* Kind of a hack since we don't actually need the contents of this matrix for it,
* so we don't really need a binary randomizer.
*/
void NVMatrix::randomizeGaussian(NVMatrix& stdevs) {
_binaryRandomize(stdevs, *this, GaussianBinaryRandomizer());
}
void NVMatrix::addGaussianNoise() {
addGaussianNoise(1);
}
void NVMatrix::addGaussianNoise(float stdev) {
addGaussianNoise(stdev, *this);
}
void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) {
_unaryRandomize(target, AddGaussianUnaryRandomizer(stdev));
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) {
addGaussianNoise(stdevs, var, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs) {
addGaussianNoise(stdevs, false, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) {
if (var) {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>());
} else {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>());
}
}
void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target);
}
void NVMatrix::biggerThan(NVMatrix& b) {
biggerThan(b, *this);
}
void NVMatrix::equals(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Equals(), b, target);
}
void NVMatrix::equals(NVMatrix& m) {
equals(m, *this);
}
void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target);
}
void NVMatrix::biggerThanVector(NVMatrix& vec) {
biggerThanVector(vec, *this);
}
void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const {
assert(startRow >= 0 && startRow < _numRows);
assert(endRow > startRow && endRow <= _numRows);
assert(startCol >= 0 && startCol < _numCols);
assert(endCol > startCol && endCol <= _numCols);
}
/*
* The only place where stride is supported for now!
* Will ALWAYS return a view of the original data, sometimes non-contiguous.
*/
NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
if (!isTrans()) {
return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false);
}
return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true);
}
/* this will NEVER return a view */
void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
int sliceRows = endRow - startRow, sliceCols = endCol - startCol;
if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) {
target.resize(sliceRows, sliceCols);
}
this->copy(target, startRow, endRow, startCol, endCol, 0, 0);
}
NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const {
return slice(startRow, endRow, 0, -1);
}
void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const {
slice(startRow, endRow, 0, -1, target);
}
NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const {
return slice(0, -1, startCol, endCol);
}
void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const {
slice(0, -1, startCol, endCol, target);
}
/*
* Guaranteed to not change the data if the number of elements doesn't change.
* So you can use this to "reshape" a matrix.
*/
bool NVMatrix::resize(int numRows, int numCols) {
bool reallocated = false;
if (numRows != _numRows || numCols != _numCols) {
// this assertion was removed by Ian Goodfellow because it seems to come too early
// assert(_ownsData);
if (_numElements != numRows * numCols) {
assert(_ownsData); // assert moved here by Ian Goodfellow
if (_numElements > 0) { // free old memory
// This line was modified by Ian Goodfellow to use device_free so theano may track device memory usage accurately
int status = device_free(_devData);
if (status != 0) {
fprintf(stderr, "!!!! memory free error: %X\n", status);
exit(EXIT_FAILURE);
}
}
if (numRows * numCols > 0) { // allocate new memory
cublasStatus status = cublasAlloc(numCols * numRows, sizeof(float), (void**) &_devData);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device memory allocation error\n");
exit(EXIT_FAILURE);
}
} else {
_devData = NULL;
}
reallocated = true;
}
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_stride = getLeadingDim();
}
return reallocated;
}
bool NVMatrix::resize(const NVMatrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
/*
bool NVMatrix::resize(const Matrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
*/
void NVMatrix::reshape(int numRows, int numCols) {
assert(isContiguous());
assert(_numElements == numRows*numCols);
_numRows = numRows;
_numCols = numCols;
_stride = getLeadingDim();
}
NVMatrix& NVMatrix::reshaped(int numRows, int numCols) {
assert(isContiguous());
assert(_numElements == numRows*numCols);
return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans);
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol) const {
srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow;
srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol;
NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol);
NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol);
srcSlice->apply(NVMatrixOps::Identity(), *destSlice);
delete srcSlice;
delete destSlice;
}
NVMatrix& NVMatrix::getTranspose() {
return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);;
}
void NVMatrix::transpose(NVMatrix& target) {
flipTrans(target);
target.setTrans(!target.isTrans());
target.reshape(target.getNumCols(), target.getNumRows());
}
void NVMatrix::transpose() {
int tmp = _numCols;
_numCols = _numRows;
_numRows = tmp;
_isTrans = !_isTrans;
}
bool NVMatrix::transpose(bool trans) {
bool oldTrans = _isTrans;
if (oldTrans != trans) {
transpose();
}
return oldTrans;
}
/*
* Flips the ordering of the matrix from row-major to column-major and vice versa.
* This creates temporary storage -- not a cheap operation.
*
* This is not equivalent to a "hard transpose". The resultant matrix still has
* the same dimensions, its layout in memory just changes.
*/
NVMatrix& NVMatrix::flipTrans() {
NVMatrix* meTrans = new NVMatrix(*this);
flipTrans(*meTrans);
return *meTrans;
}
void NVMatrix::flipTrans(NVMatrix& target) {
assert(&target != this);
target.resize(_numRows, _numCols);
target.setTrans(!isTrans());
apply(NVMatrixOps::Identity(), target);
}
void NVMatrix::squaredDiff(NVMatrix& b) {
squaredDiff(b, *this);
}
void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) {
if (scaleA == 0) {
b.scale(scaleB, target);
return;
}
if (scaleA == 1 && scaleB == 1) { // slight optimization
applyBinary(NVMatrixBinaryOps::Add(), b, target);
} else {
applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target);
}
}
void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) {
add(b, 1, scaleB, target);
}
void NVMatrix::add(NVMatrix& b, NVMatrix& target) {
add(b, 1, target);
}
void NVMatrix::add(NVMatrix& b, float scaleB) {
add(b, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) {
add(b, scaleA, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b) {
add(b, 1, *this);
}
void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) {
add(b, -1, target);
}
void NVMatrix::subtract(NVMatrix& b) {
add(b, -1);
}
void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Multiply(), b, target);
}
void NVMatrix::eltwiseMult(NVMatrix& b) {
eltwiseMult(b, *this);
}
void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Divide(), b, target);
}
void NVMatrix::eltwiseDivide(NVMatrix& b) {
eltwiseDivide(b, *this);
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) {
assert(isContiguous() && target.isContiguous());
assert(timesX > 0 && timesY > 0);
target.resize(_numRows*timesY, _numCols*timesX);
target.setTrans(_isTrans);
if(!isTrans()) {
kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numCols, _numRows, target._numCols, target._numRows);
} else {
kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numRows, _numCols, target._numRows, target._numCols);
}
cutilCheckMsg("Kernel execution failed");
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target);
}
void NVMatrix::addVector(NVMatrix& vec) {
addVector(vec, 1, *this);
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec) {
addVector(vec, scaleVec, *this);
}
void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) {
addVector(vec, 1, target);
}
void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target);
}
void NVMatrix::equalsVector(NVMatrix& vec) {
equalsVector(vec, *this);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec) {
eltwiseMultByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) {
eltwiseDivideByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target);
}
/*
* num threads per block is ignored when summing rows (axis=1) because
* it has to be a power of 2.
*
* TODO: this is a mess, fix it. it works pretty fast but it's too ugly.
* TODO: this function is _really_ bad for very long aggregations of few columns.
*/
template<class Agg, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) {
assert(axis == 0 || axis == 1);
assert(isContiguous() && target.isContiguous());
assert(&target != this);
int width = _isTrans ? _numRows : _numCols;
int height = _isTrans ? _numCols : _numRows;
target.setTrans(_isTrans);
assert(width > 0);
assert(height > 0);
if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum
target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1);
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width);
assert(numBlocks < NUM_BLOCKS_MAX);
kDumbAggCols<Agg, BinaryOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK>>>(_devData, target._devData, width, height, agg, op);
cutilCheckMsg("kDumbAggCols: Kernel execution failed");
} else { // row sum
target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1);
if (width > 1) {
if (height >= 16384) { // linear aggregation
int numBlocksX = 1;
int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X;
int numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while (numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY,2);
numBlocksX *= 2;
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
if(width <= 16) {
if(width <= 4) {
kAggShortRows<Agg, BinaryOp, 1, 4><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
} else if(width <= 8) {
kAggShortRows<Agg, BinaryOp, 1, 8><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
} else if(width <= 12) {
kAggShortRows<Agg, BinaryOp, 1, 12><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
} else {
kAggShortRows<Agg, BinaryOp, 1, 16><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
}
} else if(width <= 32) {
kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
} else if(width <= 48){
kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
} else if(width <= 64){
kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
} else {
kAggShortRows2<Agg, BinaryOp><<<grid, threads>>>(_devData, target._devData,width, height, agg, op);
}
} else {
if (width >= 512) {
dim3 threads(AWR_NUM_THREADS);
dim3 blocks(1, std::min(1024, height));
kAggRows_wholerow_nosync<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op);
// dim3 threads(AWR_NUM_THREADS);
// dim3 blocks(1, std::min(1024, height));
// kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op);
} else {
// dim3 threads(AWR_NUM_THREADS);
// dim3 blocks(1, std::min(1024, height));
// kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op);
NVMatrix *prevSum = this;
while (prevSum->getLeadingDim() > 1) {
int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
int numThreadsY = 1;
int numBlocksX = DIVUP(width, 2*numThreadsX);
int numBlocksY = std::min(height, NUM_BLOCKS_MAX);
NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false);
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
assert(numBlocksX <= NUM_BLOCKS_MAX);
assert(numBlocksY <= NUM_BLOCKS_MAX);
if(width <= 64) {
kAggRows<Agg, BinaryOp, 32><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else if(width <= 128) {
kAggRows<Agg, BinaryOp, 64><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else if(width <= 256) {
kAggRows<Agg, BinaryOp, 128><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else if(width <= 512) {
kAggRows<Agg, BinaryOp, 256><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
} else {
kAggRows<Agg, BinaryOp, 512><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData,
width, height, nvSumAccum->getLeadingDim(), agg, op);
}
cutilCheckMsg("agg rows: Kernel execution failed");
cudaThreadSynchronize();
width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway
if (prevSum != this) {
delete prevSum;
}
prevSum = nvSumAccum;
}
}
}
} else {
copy(target);
}
}
}
void NVMatrix::inRangeInc(float lower, float upper) {
inRangeInc(lower, upper, *this);
}
void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<false>(lower, upper), target);
}
void NVMatrix::inRangeExc(float lower, float upper) {
inRangeExc(lower, upper, *this);
}
void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<true>(lower, upper), target);
}
void NVMatrix::biggerThanScalar(float scalar) {
biggerThanScalar(scalar, *this);
}
void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::BiggerThanScalar(scalar), target);
}
void NVMatrix::smallerThanScalar(float scalar) {
smallerThanScalar(scalar, *this);
}
void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::SmallerThanScalar(scalar), target);
}
void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) {
apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target);
}
void NVMatrix::addScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::AddScalar(scalar), target);
}
void NVMatrix::addScalar(float scalar) {
addScalar(scalar, *this);
}
void NVMatrix::minWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MinWithScalar(scalar), target);
}
void NVMatrix::minWithScalar(float scalar) {
minWithScalar(scalar, *this);
}
void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MaxWithScalar(scalar), target);
}
void NVMatrix::maxWithScalar(float scalar) {
maxWithScalar(scalar, *this);
}
void NVMatrix::pow(float p, NVMatrix& target) {
apply(NVMatrixOps::Pow(p), target);
}
void NVMatrix::pow(float p) {
pow(p, *this);
}
void NVMatrix::scale(float _scale) {
scale(_scale, *this);
}
void NVMatrix::scale(float _scale, NVMatrix& target) {
if (_scale != 1 || &target != this) { // optimize away scale by 1
apply(NVMatrixOps::MultByScalar(_scale), target);
}
}
template<class Agg, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) {
NVMatrix *sumVec = new NVMatrix();
_aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op);
return *sumVec;
}
void NVMatrix::max(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) {
if (scaleThis != 0) {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum));
} else {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum));
}
}
void NVMatrix::sum(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second());
}
/*
void NVMatrix::min(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::max(int axis) {
return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::sum(int axis) {
return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::min(int axis) {
return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
*/
void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) {
int logn = int(ceil(log(double(n)) / log(2.)));
*numCols = DIVUP(n, logn);
int numThreads = *numCols;
*blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE));
*threads = dim3(DP_BLOCKSIZE);
}
/*
float NVMatrix::mean() {
return sum() / getNumElements();
}
float NVMatrix::sum() {
return _totalAgg(NVMatrixAggs::Sum());
}
float NVMatrix::max() {
return _totalAgg(NVMatrixAggs::Max());
}
float NVMatrix::min() {
return _totalAgg(NVMatrixAggs::Min());
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg) {
assert(isContiguous());
dim3 blocks, threads;
int numCols;
// Sum most of it on GPU
NVMatrix* src = this;
for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) {
_sum_setParams(src->getNumElements(), &blocks, &threads, &numCols);
target = new NVMatrix(1, blocks.x);
kTotalAgg<<<blocks, threads>>>(src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg);
cutilCheckMsg("kTotalAgg: Kernel execution failed");
cudaThreadSynchronize(); // not really necessary?
delete (src == this ? NULL : src);
}
Matrix srcCPU(src->getNumRows(), src->getNumCols());
src->copyToHost(srcCPU);
if (src->getNumElements() > 1) { // Sum remainder on CPU
delete (src == this ? NULL : src);
if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) {
return srcCPU.sum();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) {
return srcCPU.max();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) {
return srcCPU.min();
} else {
assert(false);
}
}
return srcCPU(0,0);
}
*/
/*
* Fast dot product only for matrices with same transposedness.
float NVMatrix::dotProduct(NVMatrix& b) {
assert(isContiguous() && b.isContiguous());
assert(isSameDims(b));
assert(isTrans() == b.isTrans()); // see?
dim3 blocks, threads;
int numCols;
_sum_setParams(getNumElements(), &blocks, &threads, &numCols);
NVMatrix target(1, blocks.x);
kDotProduct_r<<<blocks, threads>>>(getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements());
cutilCheckMsg("kDotProduct: Kernel execution failed");
cudaThreadSynchronize();
return target.sum();
}
float NVMatrix::norm2() {
return dotProduct(*this);
}
float NVMatrix::norm() {
return sqrt(norm2());
}
*/
/*
void NVMatrix::print(int startRow, int rows, int startCol, int cols) const {
cudaThreadSynchronize();
Matrix hm = Matrix(_numRows, _numCols);
copyToHost(hm);
hm.print(startRow, rows, startCol, cols);
}
void NVMatrix::print(int rows, int cols) const {
print(0, rows, 0, cols);
}
*/
void NVMatrix::printShape(const char* name) const {
printf("%s: %dx%d\n", name, _numRows, _numCols);
}
|
cd2b7af11629e211da6e5ed81ce3b9842516a599.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/cusolver_wrappers.h>
#include <test_utils.h>
#include <raft/matrix/matrix.cuh>
#include <solver/sgd.cuh>
namespace ML {
namespace Solver {
using namespace MLCommon;
template <typename T>
struct SgdInputs {
T tol;
int n_row;
int n_col;
int n_row2;
int n_col2;
int batch_size;
};
template <typename T>
class SgdTest : public ::testing::TestWithParam<SgdInputs<T>> {
protected:
void linearRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
raft::allocate(data, len);
raft::allocate(labels, params.n_row);
raft::allocate(coef, params.n_col, true);
raft::allocate(coef2, params.n_col, true);
raft::allocate(coef_ref, params.n_col);
raft::allocate(coef2_ref, params.n_col);
T data_h[len] = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0};
raft::update_device(data, data_h, len, stream);
T labels_h[params.n_row] = {6.0, 8.0, 9.0, 11.0};
raft::update_device(labels, labels_h, params.n_row, stream);
T coef_ref_h[params.n_col] = {2.087, 2.5454557};
raft::update_device(coef_ref, coef_ref_h, params.n_col, stream);
T coef2_ref_h[params.n_col] = {1.000001, 1.9999998};
raft::update_device(coef2_ref, coef2_ref_h, params.n_col, stream);
bool fit_intercept = false;
intercept = T(0);
int epochs = 2000;
T lr = T(0.01);
ML::lr_type lr_type = ML::lr_type::ADAPTIVE;
T power_t = T(0.5);
T alpha = T(0.0001);
T l1_ratio = T(0.15);
bool shuffle = true;
T tol = T(1e-10);
ML::loss_funct loss = ML::loss_funct::SQRD_LOSS;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(handle, data, params.n_row, params.n_col, labels, coef, &intercept,
fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss,
pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream);
fit_intercept = true;
intercept2 = T(0);
sgdFit(handle, data, params.n_row, params.n_col, labels, coef2, &intercept2,
fit_intercept, params.batch_size, epochs, ML::lr_type::CONSTANT, lr,
power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change,
stream);
}
void logisticRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
T *coef_class;
raft::allocate(data_logreg, len);
raft::allocate(data_logreg_test, len);
raft::allocate(labels_logreg, params.n_row2);
raft::allocate(coef_class, params.n_col2, true);
raft::allocate(pred_log, params.n_row2);
raft::allocate(pred_log_ref, params.n_row2);
T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15,
2.65, 2.65, 3.25, -0.15, -7.35, -7.35};
raft::update_device(data_logreg, data_h, len, stream);
T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5,
-3.55, -20.5, -1.3, 3.0, -5.0, 15.0};
raft::update_device(data_logreg_test, data_test_h, len, stream);
T labels_logreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0};
raft::update_device(labels_logreg, labels_logreg_h, params.n_row2, stream);
T pred_log_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0};
raft::update_device(pred_log_ref, pred_log_ref_h, params.n_row2, stream);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(0.0);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::LOG;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(handle, data_logreg, params.n_row2, params.n_col2, labels_logreg,
coef_class, &intercept_class, fit_intercept, params.batch_size,
epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle,
tol, n_iter_no_change, stream);
sgdPredictBinaryClass(handle, data_logreg_test, params.n_row2,
params.n_col2, coef_class, intercept_class, pred_log,
loss, stream);
CUDA_CHECK(hipFree(coef_class));
}
void svmTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
T *coef_class;
raft::allocate(data_svmreg, len);
raft::allocate(data_svmreg_test, len);
raft::allocate(labels_svmreg, params.n_row2);
raft::allocate(coef_class, params.n_col2, true);
raft::allocate(pred_svm, params.n_row2);
raft::allocate(pred_svm_ref, params.n_row2);
T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15,
2.65, 2.65, 3.25, -0.15, -7.35, -7.35};
raft::update_device(data_svmreg, data_h, len, stream);
T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5,
-3.55, -20.5, -1.3, 3.0, -5.0, 15.0};
raft::update_device(data_svmreg_test, data_test_h, len, stream);
T labels_svmreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0};
raft::update_device(labels_svmreg, labels_svmreg_h, params.n_row2, stream);
T pred_svm_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0};
raft::update_device(pred_svm_ref, pred_svm_ref_h, params.n_row2, stream);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(1) / T(epochs);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::HINGE;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::L2;
int n_iter_no_change = 10;
sgdFit(handle, data_svmreg, params.n_row2, params.n_col2, labels_svmreg,
coef_class, &intercept_class, fit_intercept, params.batch_size,
epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle,
tol, n_iter_no_change, stream);
sgdPredictBinaryClass(handle, data_svmreg_test, params.n_row2,
params.n_col2, coef_class, intercept_class, pred_svm,
loss, stream);
CUDA_CHECK(hipFree(coef_class));
}
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
linearRegressionTest();
logisticRegressionTest();
svmTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
CUDA_CHECK(hipFree(coef_ref));
CUDA_CHECK(hipFree(coef2));
CUDA_CHECK(hipFree(coef2_ref));
CUDA_CHECK(hipFree(data_logreg));
CUDA_CHECK(hipFree(data_logreg_test));
CUDA_CHECK(hipFree(labels_logreg));
CUDA_CHECK(hipFree(data_svmreg));
CUDA_CHECK(hipFree(data_svmreg_test));
CUDA_CHECK(hipFree(labels_svmreg));
CUDA_CHECK(hipFree(pred_svm));
CUDA_CHECK(hipFree(pred_svm_ref));
CUDA_CHECK(hipFree(pred_log));
CUDA_CHECK(hipFree(pred_log_ref));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
SgdInputs<T> params;
T *data, *labels, *coef, *coef_ref;
T *coef2, *coef2_ref;
T *data_logreg, *data_logreg_test, *labels_logreg;
T *data_svmreg, *data_svmreg_test, *labels_svmreg;
T *pred_svm, *pred_svm_ref, *pred_log, *pred_log_ref;
T intercept, intercept2;
hipStream_t stream;
raft::handle_t handle;
};
const std::vector<SgdInputs<float>> inputsf2 = {{0.01f, 4, 2, 4, 3, 2}};
const std::vector<SgdInputs<double>> inputsd2 = {{0.01, 4, 2, 4, 3, 2}};
typedef SgdTest<float> SgdTestF;
TEST_P(SgdTestF, Fit) {
ASSERT_TRUE(raft::devArrMatch(coef_ref, coef, params.n_col,
raft::CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(coef2_ref, coef2, params.n_col,
raft::CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_log_ref, pred_log, params.n_row,
raft::CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_svm_ref, pred_svm, params.n_row,
raft::CompareApproxAbs<float>(params.tol)));
}
typedef SgdTest<double> SgdTestD;
TEST_P(SgdTestD, Fit) {
ASSERT_TRUE(raft::devArrMatch(coef_ref, coef, params.n_col,
raft::CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(coef2_ref, coef2, params.n_col,
raft::CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_log_ref, pred_log, params.n_row,
raft::CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_svm_ref, pred_svm, params.n_row,
raft::CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestD, ::testing::ValuesIn(inputsd2));
} // namespace Solver
} // end namespace ML
| cd2b7af11629e211da6e5ed81ce3b9842516a599.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/cusolver_wrappers.h>
#include <test_utils.h>
#include <raft/matrix/matrix.cuh>
#include <solver/sgd.cuh>
namespace ML {
namespace Solver {
using namespace MLCommon;
template <typename T>
struct SgdInputs {
T tol;
int n_row;
int n_col;
int n_row2;
int n_col2;
int batch_size;
};
template <typename T>
class SgdTest : public ::testing::TestWithParam<SgdInputs<T>> {
protected:
void linearRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
raft::allocate(data, len);
raft::allocate(labels, params.n_row);
raft::allocate(coef, params.n_col, true);
raft::allocate(coef2, params.n_col, true);
raft::allocate(coef_ref, params.n_col);
raft::allocate(coef2_ref, params.n_col);
T data_h[len] = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0};
raft::update_device(data, data_h, len, stream);
T labels_h[params.n_row] = {6.0, 8.0, 9.0, 11.0};
raft::update_device(labels, labels_h, params.n_row, stream);
T coef_ref_h[params.n_col] = {2.087, 2.5454557};
raft::update_device(coef_ref, coef_ref_h, params.n_col, stream);
T coef2_ref_h[params.n_col] = {1.000001, 1.9999998};
raft::update_device(coef2_ref, coef2_ref_h, params.n_col, stream);
bool fit_intercept = false;
intercept = T(0);
int epochs = 2000;
T lr = T(0.01);
ML::lr_type lr_type = ML::lr_type::ADAPTIVE;
T power_t = T(0.5);
T alpha = T(0.0001);
T l1_ratio = T(0.15);
bool shuffle = true;
T tol = T(1e-10);
ML::loss_funct loss = ML::loss_funct::SQRD_LOSS;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(handle, data, params.n_row, params.n_col, labels, coef, &intercept,
fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss,
pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change, stream);
fit_intercept = true;
intercept2 = T(0);
sgdFit(handle, data, params.n_row, params.n_col, labels, coef2, &intercept2,
fit_intercept, params.batch_size, epochs, ML::lr_type::CONSTANT, lr,
power_t, loss, pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change,
stream);
}
void logisticRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
T *coef_class;
raft::allocate(data_logreg, len);
raft::allocate(data_logreg_test, len);
raft::allocate(labels_logreg, params.n_row2);
raft::allocate(coef_class, params.n_col2, true);
raft::allocate(pred_log, params.n_row2);
raft::allocate(pred_log_ref, params.n_row2);
T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15,
2.65, 2.65, 3.25, -0.15, -7.35, -7.35};
raft::update_device(data_logreg, data_h, len, stream);
T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5,
-3.55, -20.5, -1.3, 3.0, -5.0, 15.0};
raft::update_device(data_logreg_test, data_test_h, len, stream);
T labels_logreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0};
raft::update_device(labels_logreg, labels_logreg_h, params.n_row2, stream);
T pred_log_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0};
raft::update_device(pred_log_ref, pred_log_ref_h, params.n_row2, stream);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(0.0);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::LOG;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(handle, data_logreg, params.n_row2, params.n_col2, labels_logreg,
coef_class, &intercept_class, fit_intercept, params.batch_size,
epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle,
tol, n_iter_no_change, stream);
sgdPredictBinaryClass(handle, data_logreg_test, params.n_row2,
params.n_col2, coef_class, intercept_class, pred_log,
loss, stream);
CUDA_CHECK(cudaFree(coef_class));
}
void svmTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
T *coef_class;
raft::allocate(data_svmreg, len);
raft::allocate(data_svmreg_test, len);
raft::allocate(labels_svmreg, params.n_row2);
raft::allocate(coef_class, params.n_col2, true);
raft::allocate(pred_svm, params.n_row2);
raft::allocate(pred_svm_ref, params.n_row2);
T data_h[len] = {0.1, -2.1, 5.4, 5.4, -1.5, -2.15,
2.65, 2.65, 3.25, -0.15, -7.35, -7.35};
raft::update_device(data_svmreg, data_h, len, stream);
T data_test_h[len] = {0.3, 1.1, 2.1, -10.1, 0.5, 2.5,
-3.55, -20.5, -1.3, 3.0, -5.0, 15.0};
raft::update_device(data_svmreg_test, data_test_h, len, stream);
T labels_svmreg_h[params.n_row2] = {0.0, 1.0, 1.0, 0.0};
raft::update_device(labels_svmreg, labels_svmreg_h, params.n_row2, stream);
T pred_svm_ref_h[params.n_row2] = {1.0, 0.0, 1.0, 1.0};
raft::update_device(pred_svm_ref, pred_svm_ref_h, params.n_row2, stream);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(1) / T(epochs);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::HINGE;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::L2;
int n_iter_no_change = 10;
sgdFit(handle, data_svmreg, params.n_row2, params.n_col2, labels_svmreg,
coef_class, &intercept_class, fit_intercept, params.batch_size,
epochs, lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle,
tol, n_iter_no_change, stream);
sgdPredictBinaryClass(handle, data_svmreg_test, params.n_row2,
params.n_col2, coef_class, intercept_class, pred_svm,
loss, stream);
CUDA_CHECK(cudaFree(coef_class));
}
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
linearRegressionTest();
logisticRegressionTest();
svmTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
CUDA_CHECK(cudaFree(coef_ref));
CUDA_CHECK(cudaFree(coef2));
CUDA_CHECK(cudaFree(coef2_ref));
CUDA_CHECK(cudaFree(data_logreg));
CUDA_CHECK(cudaFree(data_logreg_test));
CUDA_CHECK(cudaFree(labels_logreg));
CUDA_CHECK(cudaFree(data_svmreg));
CUDA_CHECK(cudaFree(data_svmreg_test));
CUDA_CHECK(cudaFree(labels_svmreg));
CUDA_CHECK(cudaFree(pred_svm));
CUDA_CHECK(cudaFree(pred_svm_ref));
CUDA_CHECK(cudaFree(pred_log));
CUDA_CHECK(cudaFree(pred_log_ref));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
SgdInputs<T> params;
T *data, *labels, *coef, *coef_ref;
T *coef2, *coef2_ref;
T *data_logreg, *data_logreg_test, *labels_logreg;
T *data_svmreg, *data_svmreg_test, *labels_svmreg;
T *pred_svm, *pred_svm_ref, *pred_log, *pred_log_ref;
T intercept, intercept2;
cudaStream_t stream;
raft::handle_t handle;
};
const std::vector<SgdInputs<float>> inputsf2 = {{0.01f, 4, 2, 4, 3, 2}};
const std::vector<SgdInputs<double>> inputsd2 = {{0.01, 4, 2, 4, 3, 2}};
typedef SgdTest<float> SgdTestF;
TEST_P(SgdTestF, Fit) {
ASSERT_TRUE(raft::devArrMatch(coef_ref, coef, params.n_col,
raft::CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(coef2_ref, coef2, params.n_col,
raft::CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_log_ref, pred_log, params.n_row,
raft::CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_svm_ref, pred_svm, params.n_row,
raft::CompareApproxAbs<float>(params.tol)));
}
typedef SgdTest<double> SgdTestD;
TEST_P(SgdTestD, Fit) {
ASSERT_TRUE(raft::devArrMatch(coef_ref, coef, params.n_col,
raft::CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(coef2_ref, coef2, params.n_col,
raft::CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_log_ref, pred_log, params.n_row,
raft::CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(raft::devArrMatch(pred_svm_ref, pred_svm, params.n_row,
raft::CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestD, ::testing::ValuesIn(inputsd2));
} // namespace Solver
} // end namespace ML
|
dde61e67b7a81efc41910b16180cef1834792a0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/util/math_functions.hpp"
namespace caffe9 {
template <typename Dtype>
__global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h,
Dtype rms_decay, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi;
g[i] = local_rate * g[i] / (sqrt(hi) + delta);
}
}
template <typename Dtype>
void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay,
Dtype delta, Dtype local_rate) {
RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, rms_decay, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void rmsprop_update_gpu<float>(int, float*, float*, float, float,
float);
template void rmsprop_update_gpu<double>(int, double*, double*, double, double,
double);
} // namespace caffe9
| dde61e67b7a81efc41910b16180cef1834792a0e.cu | #include "caffe/util/math_functions.hpp"
namespace caffe9 {
template <typename Dtype>
__global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h,
Dtype rms_decay, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi;
g[i] = local_rate * g[i] / (sqrt(hi) + delta);
}
}
template <typename Dtype>
void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay,
Dtype delta, Dtype local_rate) {
RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, rms_decay, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void rmsprop_update_gpu<float>(int, float*, float*, float, float,
float);
template void rmsprop_update_gpu<double>(int, double*, double*, double, double,
double);
} // namespace caffe9
|
a1eaf01bdabf9246a43c79c6141cdeaeb8ae8642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <stdint.h>
#include <cassert>
#include <iostream>
#include <stdio.h>
#include "cudacommon.h"
#include "clock.h"
using namespace std;
#define XOR(a, b, mask1, mask2) ((a ^ b) & mask1 & mask2)
__global__ void doGPUMatchKernel(const uint8_t* rotatedTemplates, const uint8_t* rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* distances)
{
__shared__ float hammingDistances[MAX_ROTS];
unsigned templateIdx = blockIdx.x;
unsigned rotationIdx = threadIdx.x;
if (templateIdx > database.numberOfTemplates) {
return;
}
size_t templateSize = database.templateWidth * database.templateHeight;
size_t templateWords = templateSize / 4; // 4 == sizeof(uint32_t);
// Cast from chars to words
uint32_t* rotatedTemplate = (uint32_t*)(rotatedTemplates + rotationIdx*templateSize);
uint32_t* rotatedMask = (uint32_t*)(rotatedMasks + rotationIdx*templateSize);
uint32_t* otherTemplate = (uint32_t*)(database.d_templates + templateIdx*templateSize);
uint32_t* otherMask = (uint32_t*)(database.d_masks + templateIdx*templateSize);
size_t nonZeroBits = 0, totalBits = 0;
uint32_t word1, mask1;
__shared__ uint32_t word2, mask2;
for (size_t i = 0; i < templateWords; i++) {
word1 = rotatedTemplate[i];
mask1 = rotatedMask[i];
if (threadIdx.x == 0) {
word2 = otherTemplate[i];
mask2 = otherMask[i];
}
__syncthreads();
// __popc(x) returns the number of bits that are set to 1 in the binary representation of 32-bit integer parameter x.
uint32_t x = XOR(word1, word2, mask1, mask2);
nonZeroBits += __popc(x);
totalBits += __popc(mask1 & mask2);
}
hammingDistances[rotationIdx] = float(nonZeroBits) / float(totalBits);
__syncthreads();
if (threadIdx.x == 0) {
float minHD = 1.0;
for (int i = 0; i < blockDim.x; i++) {
minHD = min(minHD, hammingDistances[i]);
}
distances[templateIdx] = minHD;
}
}
__global__ void doGPUAContrarioMatchKernel(const uint8_t* rotatedTemplates, const uint8_t* rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* distances)
{
__shared__ float hammingDistances[MAX_PARTS][MAX_ROTS];
unsigned templateIdx = blockIdx.x;
unsigned part = threadIdx.y;
unsigned nParts = blockDim.y;
unsigned rotationIdx = threadIdx.x;
//assert((database.templateWidth % 4) == 0);
size_t templateSize = database.templateWidth * database.templateHeight;
uint32_t* rotatedTemplate = (uint32_t*)(rotatedTemplates + rotationIdx*templateSize);
uint32_t* rotatedMask = (uint32_t*)(rotatedMasks + rotationIdx*templateSize);
uint32_t* otherTemplate = (uint32_t*)(database.d_templates + templateIdx*templateSize);
uint32_t* otherMask = (uint32_t*)(database.d_masks + templateIdx*templateSize);
unsigned widthRows = database.templateWidth / 4; // Width of the template in 32-bit words
unsigned partWidthWords = ceil(float(widthRows)/float(nParts)); // Width of the part in 32-bit words
size_t nonZeroBits = 0, totalBits = 0;
uint32_t word1, mask1;
__shared__ uint32_t words2[MAX_ROTS], masks2[MAX_ROTS];
uint32_t word2, mask2;
unsigned w0row = floor((float(widthRows)/float(nParts))*float(part)); // Offset of the first word in the part for each row
unsigned idx;
for (unsigned row = 0; row < database.templateHeight; row++) {
for (unsigned col = 0; col < partWidthWords; col++) {
idx = row*widthRows + w0row + col;
word1 = rotatedTemplate[idx];
mask1 = rotatedMask[idx];
if (threadIdx.x == 0) {
words2[part] = otherTemplate[idx];
masks2[part] = otherMask[idx];
}
__syncthreads();
word2 = words2[part];
mask2 = masks2[part];
uint32_t x = XOR(word1, word2, mask1, mask2);
nonZeroBits += __popc(x);
totalBits += __popc(mask1 & mask2);
}
}
hammingDistances[part][threadIdx.x] = float(nonZeroBits) / float(totalBits);
__syncthreads();
if (threadIdx.x == 0) {
float minHD = 1.0;
for (int i = 0; i < blockDim.x; i++) {
minHD = min(minHD, hammingDistances[part][i]);
}
distances[nParts*templateIdx+part] = minHD;
}
}
// Wrapper functions to invoke from C++
void doGPUMatchKernelWrapper(dim3 blockSize, dim3 gridSize, const uint8_t* d_rotatedTemplates, const uint8_t* d_rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* d_distances)
{
hipLaunchKernelGGL(( doGPUMatchKernel), dim3(gridSize), dim3(blockSize), 0, 0,
d_rotatedTemplates,
d_rotatedMasks,
nRotatedTemplates,
database,
d_distances
);
}
void doGPUAContrarioMatchKernelWrapper(dim3 blockSize, dim3 gridSize, const uint8_t* d_rotatedTemplates, const uint8_t* d_rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* d_distances)
{
hipLaunchKernelGGL(( doGPUAContrarioMatchKernel), dim3(gridSize), dim3(blockSize), 0, 0,
d_rotatedTemplates,
d_rotatedMasks,
nRotatedTemplates,
database,
d_distances
);
}
| a1eaf01bdabf9246a43c79c6141cdeaeb8ae8642.cu | #include <vector>
#include <stdint.h>
#include <cassert>
#include <iostream>
#include <stdio.h>
#include "cudacommon.h"
#include "clock.h"
using namespace std;
#define XOR(a, b, mask1, mask2) ((a ^ b) & mask1 & mask2)
__global__ void doGPUMatchKernel(const uint8_t* rotatedTemplates, const uint8_t* rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* distances)
{
__shared__ float hammingDistances[MAX_ROTS];
unsigned templateIdx = blockIdx.x;
unsigned rotationIdx = threadIdx.x;
if (templateIdx > database.numberOfTemplates) {
return;
}
size_t templateSize = database.templateWidth * database.templateHeight;
size_t templateWords = templateSize / 4; // 4 == sizeof(uint32_t);
// Cast from chars to words
uint32_t* rotatedTemplate = (uint32_t*)(rotatedTemplates + rotationIdx*templateSize);
uint32_t* rotatedMask = (uint32_t*)(rotatedMasks + rotationIdx*templateSize);
uint32_t* otherTemplate = (uint32_t*)(database.d_templates + templateIdx*templateSize);
uint32_t* otherMask = (uint32_t*)(database.d_masks + templateIdx*templateSize);
size_t nonZeroBits = 0, totalBits = 0;
uint32_t word1, mask1;
__shared__ uint32_t word2, mask2;
for (size_t i = 0; i < templateWords; i++) {
word1 = rotatedTemplate[i];
mask1 = rotatedMask[i];
if (threadIdx.x == 0) {
word2 = otherTemplate[i];
mask2 = otherMask[i];
}
__syncthreads();
// __popc(x) returns the number of bits that are set to 1 in the binary representation of 32-bit integer parameter x.
uint32_t x = XOR(word1, word2, mask1, mask2);
nonZeroBits += __popc(x);
totalBits += __popc(mask1 & mask2);
}
hammingDistances[rotationIdx] = float(nonZeroBits) / float(totalBits);
__syncthreads();
if (threadIdx.x == 0) {
float minHD = 1.0;
for (int i = 0; i < blockDim.x; i++) {
minHD = min(minHD, hammingDistances[i]);
}
distances[templateIdx] = minHD;
}
}
__global__ void doGPUAContrarioMatchKernel(const uint8_t* rotatedTemplates, const uint8_t* rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* distances)
{
__shared__ float hammingDistances[MAX_PARTS][MAX_ROTS];
unsigned templateIdx = blockIdx.x;
unsigned part = threadIdx.y;
unsigned nParts = blockDim.y;
unsigned rotationIdx = threadIdx.x;
//assert((database.templateWidth % 4) == 0);
size_t templateSize = database.templateWidth * database.templateHeight;
uint32_t* rotatedTemplate = (uint32_t*)(rotatedTemplates + rotationIdx*templateSize);
uint32_t* rotatedMask = (uint32_t*)(rotatedMasks + rotationIdx*templateSize);
uint32_t* otherTemplate = (uint32_t*)(database.d_templates + templateIdx*templateSize);
uint32_t* otherMask = (uint32_t*)(database.d_masks + templateIdx*templateSize);
unsigned widthRows = database.templateWidth / 4; // Width of the template in 32-bit words
unsigned partWidthWords = ceil(float(widthRows)/float(nParts)); // Width of the part in 32-bit words
size_t nonZeroBits = 0, totalBits = 0;
uint32_t word1, mask1;
__shared__ uint32_t words2[MAX_ROTS], masks2[MAX_ROTS];
uint32_t word2, mask2;
unsigned w0row = floor((float(widthRows)/float(nParts))*float(part)); // Offset of the first word in the part for each row
unsigned idx;
for (unsigned row = 0; row < database.templateHeight; row++) {
for (unsigned col = 0; col < partWidthWords; col++) {
idx = row*widthRows + w0row + col;
word1 = rotatedTemplate[idx];
mask1 = rotatedMask[idx];
if (threadIdx.x == 0) {
words2[part] = otherTemplate[idx];
masks2[part] = otherMask[idx];
}
__syncthreads();
word2 = words2[part];
mask2 = masks2[part];
uint32_t x = XOR(word1, word2, mask1, mask2);
nonZeroBits += __popc(x);
totalBits += __popc(mask1 & mask2);
}
}
hammingDistances[part][threadIdx.x] = float(nonZeroBits) / float(totalBits);
__syncthreads();
if (threadIdx.x == 0) {
float minHD = 1.0;
for (int i = 0; i < blockDim.x; i++) {
minHD = min(minHD, hammingDistances[part][i]);
}
distances[nParts*templateIdx+part] = minHD;
}
}
// Wrapper functions to invoke from C++
void doGPUMatchKernelWrapper(dim3 blockSize, dim3 gridSize, const uint8_t* d_rotatedTemplates, const uint8_t* d_rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* d_distances)
{
doGPUMatchKernel<<<gridSize, blockSize>>>(
d_rotatedTemplates,
d_rotatedMasks,
nRotatedTemplates,
database,
d_distances
);
}
void doGPUAContrarioMatchKernelWrapper(dim3 blockSize, dim3 gridSize, const uint8_t* d_rotatedTemplates, const uint8_t* d_rotatedMasks, size_t nRotatedTemplates, const GPUDatabase database, float* d_distances)
{
doGPUAContrarioMatchKernel<<<gridSize, blockSize>>>(
d_rotatedTemplates,
d_rotatedMasks,
nRotatedTemplates,
database,
d_distances
);
}
|
963d44d1f795669f6bffba7a5c44db58689641dc.hip | // !!! This is a file automatically generated by hipify!!!
// ECE 406 Lab 5, Fall 2015
#include <stdio.h>
// CUDA stuff:
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// OpenCV stuff (note: C++ not C):
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
int M; // number of rows in image
int N; // number of columns in image
// These come from CLI arguments:
int BOX_SIZE; // ThreadsPerBlock == BOX_SIZE*BOX_SIZE
bool show_images; // whether we should pop up the I/O images or not
__global__ void lab5_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N)
{
//printf("int blockIdx.x = %d and int blockIdx.y = %d\n", blockIdx.x, blockIdx.y);
/*if (blockDim.x != 2 && blockDim.y != 2){
printf("int blockDim.x = %d and int blockDim.y = %d\n", blockDim.x, blockDim.y);
}*/
//printf("int threadIdx.x = %d and int threadIdx.y = %d\n", threadIdx.x, threadIdx.y);
//printf("M = %d N = %d\n", M, N);
int i = blockIdx.x * blockDim.x + threadIdx.x; // row of image
int j = blockIdx.y * blockDim.y + threadIdx.y; // column of image
int idx = i*N + j; // which pixel in full 1D array
int i2 = i; // new row of image
int j2 = N - j; // new column of image
int idx2 = i2*N + j2;
uchar output = GPU_i[idx]; // no change, REPLACE THIS
//printf("idx = %d = %d * %d + %d\n", idx, i, N, j);
if (i <= 10 && j <= 10){
//printf("idx = %d = %d * %d + %d\n", idx, i, N, j);
//printf("GPU_i [%d] = %u\n", idx, GPU_i[idx]);
}
GPU_o[idx2] = output;
}
// Display image until a key is pressed in the window:
void show_image(Mat image, string title) {
if (show_images) {
namedWindow(title, WINDOW_AUTOSIZE); // create window
imshow(title, image); // show image
waitKey(0);
}
}
int main(int argc, char *argv[])
{
float GPURuntimes[4]; // run times of the GPU code
hipError_t cudaStatus;
int *CPU_OutputArray; // where the GPU should copy the output back to
if( argc != 5) {
printf("Usage: %s <input image> <output image> <box size> <show images>\n", argv[0]);
printf(" where 'show images' is 0 or 1\n");
exit(EXIT_FAILURE);
}
BOX_SIZE = atoi(argv[3]);
show_images = atoi(argv[4]);
// Load image:
Mat image; // see http://docs.opencv.org/modules/core/doc/basic_structures.html#mat
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
// we could load it as CV_LOAD_IMAGE_COLOR, but we don't want to worry about that extra dimension
if(! image.data ) {
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.rows, image.cols, image.dims);
// Set up global variables based on image size:
M = image.rows;
N = image.cols;
// Display the input image:
show_image(image, "input image");
// Create CPU memory to store the output:
CPU_OutputArray = (int*)malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL) {
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
// Run it:
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \n Total=%5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
printf("-----------------------------------------------------------------\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
// Display the output image:
Mat result = Mat(M, N, CV_8UC1, CPU_OutputArray);
show_image(result, "output image");
// and save it to disk:
string output_filename = argv[2];
if (!imwrite(output_filename, result)) {
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
output_filename.c_str(), result.rows, result.cols, result.dims);
free(CPU_OutputArray);
exit(EXIT_SUCCESS);
}
// Helper function for launching a CUDA kernel (including memcpy, timing, etc.):
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes)
{
hipEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image (i.e. input or output) in bytes
uchar *GPU_idata;
uchar *GPU_odata;
// Note that we could store GPU_i and GPU_o as 2D arrays instead of 1D...
// it would make indexing simpler, but could complicate memcpy.
dim3 threadsPerBlock;
dim3 numBlocks;
// Choose which GPU to run on; change this on a multi-GPU system.
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0); // use the first GPU (not necessarily the fastest)
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
hipEventCreate(&time1);
hipEventCreate(&time2);
hipEventCreate(&time3);
hipEventCreate(&time4);
hipEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = hipMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(GPU_idata, image.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( lab5_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N);
// Check for errors immediately after kernel launch.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1);
hipEventSynchronize(time2);
hipEventSynchronize(time3);
hipEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecutionTime, time2, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
hipFree(GPU_odata);
hipFree(GPU_idata);
hipEventDestroy(time1);
hipEventDestroy(time2);
hipEventDestroy(time3);
hipEventDestroy(time4);
return cudaStatus;
}
| 963d44d1f795669f6bffba7a5c44db58689641dc.cu | // ECE 406 Lab 5, Fall 2015
#include <stdio.h>
// CUDA stuff:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// OpenCV stuff (note: C++ not C):
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
int M; // number of rows in image
int N; // number of columns in image
// These come from CLI arguments:
int BOX_SIZE; // ThreadsPerBlock == BOX_SIZE*BOX_SIZE
bool show_images; // whether we should pop up the I/O images or not
__global__ void lab5_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N)
{
//printf("int blockIdx.x = %d and int blockIdx.y = %d\n", blockIdx.x, blockIdx.y);
/*if (blockDim.x != 2 && blockDim.y != 2){
printf("int blockDim.x = %d and int blockDim.y = %d\n", blockDim.x, blockDim.y);
}*/
//printf("int threadIdx.x = %d and int threadIdx.y = %d\n", threadIdx.x, threadIdx.y);
//printf("M = %d N = %d\n", M, N);
int i = blockIdx.x * blockDim.x + threadIdx.x; // row of image
int j = blockIdx.y * blockDim.y + threadIdx.y; // column of image
int idx = i*N + j; // which pixel in full 1D array
int i2 = i; // new row of image
int j2 = N - j; // new column of image
int idx2 = i2*N + j2;
uchar output = GPU_i[idx]; // no change, REPLACE THIS
//printf("idx = %d = %d * %d + %d\n", idx, i, N, j);
if (i <= 10 && j <= 10){
//printf("idx = %d = %d * %d + %d\n", idx, i, N, j);
//printf("GPU_i [%d] = %u\n", idx, GPU_i[idx]);
}
GPU_o[idx2] = output;
}
// Display image until a key is pressed in the window:
void show_image(Mat image, string title) {
if (show_images) {
namedWindow(title, WINDOW_AUTOSIZE); // create window
imshow(title, image); // show image
waitKey(0);
}
}
int main(int argc, char *argv[])
{
float GPURuntimes[4]; // run times of the GPU code
cudaError_t cudaStatus;
int *CPU_OutputArray; // where the GPU should copy the output back to
if( argc != 5) {
printf("Usage: %s <input image> <output image> <box size> <show images>\n", argv[0]);
printf(" where 'show images' is 0 or 1\n");
exit(EXIT_FAILURE);
}
BOX_SIZE = atoi(argv[3]);
show_images = atoi(argv[4]);
// Load image:
Mat image; // see http://docs.opencv.org/modules/core/doc/basic_structures.html#mat
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
// we could load it as CV_LOAD_IMAGE_COLOR, but we don't want to worry about that extra dimension
if(! image.data ) {
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.rows, image.cols, image.dims);
// Set up global variables based on image size:
M = image.rows;
N = image.cols;
// Display the input image:
show_image(image, "input image");
// Create CPU memory to store the output:
CPU_OutputArray = (int*)malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL) {
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
// Run it:
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \n Total=%5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
printf("-----------------------------------------------------------------\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
// Display the output image:
Mat result = Mat(M, N, CV_8UC1, CPU_OutputArray);
show_image(result, "output image");
// and save it to disk:
string output_filename = argv[2];
if (!imwrite(output_filename, result)) {
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
output_filename.c_str(), result.rows, result.cols, result.dims);
free(CPU_OutputArray);
exit(EXIT_SUCCESS);
}
// Helper function for launching a CUDA kernel (including memcpy, timing, etc.):
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes)
{
cudaEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image (i.e. input or output) in bytes
uchar *GPU_idata;
uchar *GPU_odata;
// Note that we could store GPU_i and GPU_o as 2D arrays instead of 1D...
// it would make indexing simpler, but could complicate memcpy.
dim3 threadsPerBlock;
dim3 numBlocks;
// Choose which GPU to run on; change this on a multi-GPU system.
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0); // use the first GPU (not necessarily the fastest)
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = cudaMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPU_idata, image.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
lab5_kernel<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N);
// Check for errors immediately after kernel launch.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
cudaFree(GPU_odata);
cudaFree(GPU_idata);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
return cudaStatus;
}
|
201460cd0686237d88a7e584412611246de3510f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 University of Basel, Medical Image Analysis Center
*
* Author: Benedikt Bitterli (benedikt.bitterli@unibas.ch)
* Christoph Jud (christoph.jud@unibas.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GpuEvaluator.h"
struct GpuMSEParams
{
ScalarType *diffs;
};
#if SpaceDimensions == 3
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateValueMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec3i size = params.subsampledSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int z = blockDim.z*blockIdx.z + threadIdx.z;
if (x >= size[0] || y >= size[1] || z >= size[2])
return;
Vec3i samplePos = Vec3i(x, y, z);
if (DoSubsample)
samplePos = samplePos*params.subsample + params.gridShift[x + y*size[0] + z*size[0]*size[1]];
Vec3f fixedImagePoint = params.fixedImage.toGlobal(Vec3f(samplePos));
Vec3f centerRegionPoint = fixedImagePoint;
if (params.useDisplacementField)
centerRegionPoint += params.displacementField.atGlobal(fixedImagePoint);
ScalarType support;
ScalarType sigmaP;
Mat3f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec3f indexPoint = params.cpImage.toLocal(fixedImagePoint);
Vec3i imgLower = ::min(::max(Vec3i(indexPoint - support/params.cpImage.scale() + 1.0), Vec3i(0)), params.cpImage.size() - 1);
Vec3i imgUpper = ::min(::max(Vec3i(indexPoint + support/params.cpImage.scale() ), Vec3i(0)), params.cpImage.size() - 1);
for (int zi = imgLower[2]; zi <= imgUpper[2]; ++zi) {
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
Vec3f point_i = params.cpImage.toGlobal(Vec3f(Vec3i(xi, yi, zi)));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(fixedImagePoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(fixedImagePoint, point_i, covP);
else
k = params.kernel.evaluate(fixedImagePoint, point_i);
centerRegionPoint += params.cpwImage.at(Vec3i(xi, yi, zi))*k*params.cpImage.at(Vec3i(xi, yi, zi));
}
}
}
ScalarType diff = 0.0;
Vec3f movingImageGradient(0.0);
int inside = 0;
if (params.movingImage.insideGlobal(centerRegionPoint)) {
ScalarType movingImageValue;
params.movingImage.derivsGlobal(centerRegionPoint, movingImageValue, movingImageGradient);
diff = movingImageValue - params.fixedImage.at(samplePos);
inside = 1;
}
mse_params.diffs [x + y*size[0] + z*size[0]*size[1]] = diff;
params.gradients [x + y*size[0] + z*size[0]*size[1]] = movingImageGradient;
params.pixelsCounted[x + y*size[0] + z*size[0]*size[1]] = inside;
}
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateDerivativeMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec3i size = params.paramSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int z = blockDim.z*blockIdx.z + threadIdx.z;
if (x >= size[0] || y >= size[1] || z >= size[2])
return;
Vec3f globalPoint = params.cpImage.toGlobal(Vec3f(Vec3i(x, y, z)));
ScalarType w_i = params.cpwImage.at(Vec3i(x, y, z));
ScalarType support;
ScalarType sigmaP;
Mat3f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(globalPoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(globalPoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec3f indexPoint = params.fixedImage.toLocal(globalPoint);
Vec3i imgLower, imgUpper;
if (DoSubsample) {
imgLower = ::min(::max(Vec3i((indexPoint - support/params.fixedImage.scale())/float(params.subsample) ), Vec3i(0)), params.subsampledSize - 1);
imgUpper = ::min(::max(Vec3i((indexPoint + support/params.fixedImage.scale())/float(params.subsample) + 1.0), Vec3i(0)), params.subsampledSize - 1);
} else {
imgLower = ::min(::max(Vec3i(indexPoint - support/params.fixedImage.scale() ), Vec3i(0)), params.fixedImage.size() - 1);
imgUpper = ::min(::max(Vec3i(indexPoint + support/params.fixedImage.scale() + 1.0), Vec3i(0)), params.fixedImage.size() - 1);
}
Vec3f derivative(0.0);
for (int zi = imgLower[2]; zi <= imgUpper[2]; ++zi) {
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
int idx;
if (DoSubsample)
idx = xi + params.subsampledSize[0]*(yi + params.subsampledSize[1]*zi);
else
idx = params.fixedImage.toIndex(Vec3i(xi, yi, zi));
ScalarType diff = mse_params.diffs[idx];
Vec3f gradient = params.gradients[idx];
Vec3f point_i;
if (DoSubsample)
point_i = params.fixedImage.toGlobal(Vec3f(Vec3i(xi, yi, zi)*params.subsample + params.gridShift[idx]));
else
point_i = params.fixedImage.toGlobal(Vec3f(Vec3i(xi, yi, zi)));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(globalPoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(globalPoint, point_i, covP);
else
k = params.kernel.evaluate(globalPoint, point_i);
derivative += static_cast<ScalarType>(2.0)*diff*w_i*k*gradient;
}
}
}
const int dimensionStride = params.cpImage.size().product();
int pixelIndex = params.cpImage.toIndex(Vec3i(x, y, z));
for (unsigned d = 0; d < SpaceDimensions; d++){
params.derivatives[pixelIndex + d*dimensionStride] = derivative[d];
}
}
#else //////////// 2D ///////////
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateValueMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec2i size = params.subsampledSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= size[0] || y >= size[1])
return;
Vec2i samplePos = Vec2i(x, y);
if (DoSubsample)
samplePos = samplePos*params.subsample + params.gridShift[x + y*size[0]];
Vec2f fixedImagePoint = params.fixedImage.toGlobal(Vec2f(samplePos));
Vec2f movingImagePoint = fixedImagePoint;
if (params.useDisplacementField)
movingImagePoint += params.displacementField.atGlobal(fixedImagePoint);
ScalarType support;
ScalarType sigmaP;
Mat2f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec2f indexPoint = params.cpImage.toLocal(fixedImagePoint);
Vec2i imgLower = ::min(::max(Vec2i(indexPoint - support/params.cpImage.scale() + 1.0), Vec2i(0)), params.cpImage.size() - 1);
Vec2i imgUpper = ::min(::max(Vec2i(indexPoint + support/params.cpImage.scale() ), Vec2i(0)), params.cpImage.size() - 1);
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
Vec2f point_i = params.cpImage.toGlobal(Vec2f(xi, yi));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(fixedImagePoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(fixedImagePoint, point_i, covP);
else
k = params.kernel.evaluate(fixedImagePoint, point_i);
movingImagePoint += params.cpwImage.at(Vec2i(xi, yi))*k*params.cpImage.at(Vec2i(xi, yi));
}
}
ScalarType diff = 0.0;
Vec2f movingImageGradient(0.0);
int inside = 0;
if (params.movingImage.insideGlobal(movingImagePoint)) {
ScalarType movingImageValue;
params.movingImage.derivsGlobal(movingImagePoint, movingImageValue, movingImageGradient);
diff = movingImageValue - params.fixedImage.at(samplePos);
inside = 1;
}
mse_params.diffs [x + y*size[0]] = diff;
params.gradients [x + y*size[0]] = movingImageGradient;
params.pixelsCounted[x + y*size[0]] = inside;
}
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateDerivativeMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec2i size = params.paramSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= size[0] || y >= size[1])
return;
Vec2f globalPoint = params.cpImage.toGlobal(Vec2f(static_cast<ScalarType>(x), static_cast<ScalarType>(y)));
ScalarType w_i = params.cpwImage.at(Vec2i(x, y));
ScalarType support;
ScalarType sigmaP;
Mat2f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(globalPoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(globalPoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec2f indexPoint = params.fixedImage.toLocal(globalPoint);
Vec2i imgLower, imgUpper;
if (DoSubsample) {
imgLower = ::min(::max(Vec2i((indexPoint - support/params.fixedImage.scale())/float(params.subsample) + 1.0), Vec2i(0)), params.subsampledSize - 1);
imgUpper = ::min(::max(Vec2i((indexPoint + support/params.fixedImage.scale())/float(params.subsample) ), Vec2i(0)), params.subsampledSize - 1);
} else {
imgLower = ::min(::max(Vec2i(indexPoint - support/params.fixedImage.scale() + 1.0), Vec2i(0)), params.fixedImage.size() - 1);
imgUpper = ::min(::max(Vec2i(indexPoint + support/params.fixedImage.scale() ), Vec2i(0)), params.fixedImage.size() - 1);
}
Vec2f derivative(0.0);
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
int idx;
if (DoSubsample)
idx = xi + params.subsampledSize[0]*yi;
else
idx = params.fixedImage.toIndex(Vec2i(xi, yi));
ScalarType diff = mse_params.diffs[idx];
Vec2f gradient = params.gradients[idx];
Vec2f point_i;
if (DoSubsample)
point_i = params.fixedImage.toGlobal(Vec2f(Vec2i(xi, yi)*params.subsample + params.gridShift[idx]));
else
point_i = params.fixedImage.toGlobal(Vec2f(Vec2i(xi, yi)));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(globalPoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(globalPoint, point_i, covP);
else
k = params.kernel.evaluate(globalPoint, point_i);
derivative += static_cast<ScalarType>(2.0)*diff*w_i*k*gradient;
}
}
const int dimensionStride = params.cpImage.size().product();
int pixelIndex = params.cpImage.toIndex(Vec2i(x, y));
for (unsigned d = 0; d < SpaceDimensions; d++){
params.derivatives[pixelIndex + d*dimensionStride] = derivative[d];
}
}
#endif
template<bool DoSubsample>
void resolveValueMSE(dim3 gridDim, dim3 blockDim, GpuParams params, GpuMSEParams mse_params)
{
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( evaluateValueMSE<DoSubsample, true, false>), dim3(gridDim), dim3(blockDim), 0, 0, params, mse_params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( evaluateValueMSE<DoSubsample, false, true>) , dim3(gridDim), dim3(blockDim), 0, 0, params, mse_params);
else
hipLaunchKernelGGL(( evaluateValueMSE<DoSubsample, false, false>), dim3(gridDim), dim3(blockDim), 0, 0, params, mse_params);
}
template<bool DoSubsample>
void resolveDerivativeMSE(dim3 gridDim, dim3 blockDim, GpuParams params, GpuMSEParams mse_params)
{
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( evaluateDerivativeMSE<DoSubsample, true, false>), dim3(gridDim), dim3(blockDim), 0, 0, params, mse_params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( evaluateDerivativeMSE<DoSubsample, false, true>) , dim3(gridDim), dim3(blockDim), 0, 0, params, mse_params);
else
hipLaunchKernelGGL(( evaluateDerivativeMSE<DoSubsample, false, false>), dim3(gridDim), dim3(blockDim), 0, 0, params, mse_params);
}
| 201460cd0686237d88a7e584412611246de3510f.cu | /*
* Copyright 2016 University of Basel, Medical Image Analysis Center
*
* Author: Benedikt Bitterli (benedikt.bitterli@unibas.ch)
* Christoph Jud (christoph.jud@unibas.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GpuEvaluator.h"
struct GpuMSEParams
{
ScalarType *diffs;
};
#if SpaceDimensions == 3
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateValueMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec3i size = params.subsampledSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int z = blockDim.z*blockIdx.z + threadIdx.z;
if (x >= size[0] || y >= size[1] || z >= size[2])
return;
Vec3i samplePos = Vec3i(x, y, z);
if (DoSubsample)
samplePos = samplePos*params.subsample + params.gridShift[x + y*size[0] + z*size[0]*size[1]];
Vec3f fixedImagePoint = params.fixedImage.toGlobal(Vec3f(samplePos));
Vec3f centerRegionPoint = fixedImagePoint;
if (params.useDisplacementField)
centerRegionPoint += params.displacementField.atGlobal(fixedImagePoint);
ScalarType support;
ScalarType sigmaP;
Mat3f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec3f indexPoint = params.cpImage.toLocal(fixedImagePoint);
Vec3i imgLower = std::min(std::max(Vec3i(indexPoint - support/params.cpImage.scale() + 1.0), Vec3i(0)), params.cpImage.size() - 1);
Vec3i imgUpper = std::min(std::max(Vec3i(indexPoint + support/params.cpImage.scale() ), Vec3i(0)), params.cpImage.size() - 1);
for (int zi = imgLower[2]; zi <= imgUpper[2]; ++zi) {
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
Vec3f point_i = params.cpImage.toGlobal(Vec3f(Vec3i(xi, yi, zi)));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(fixedImagePoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(fixedImagePoint, point_i, covP);
else
k = params.kernel.evaluate(fixedImagePoint, point_i);
centerRegionPoint += params.cpwImage.at(Vec3i(xi, yi, zi))*k*params.cpImage.at(Vec3i(xi, yi, zi));
}
}
}
ScalarType diff = 0.0;
Vec3f movingImageGradient(0.0);
int inside = 0;
if (params.movingImage.insideGlobal(centerRegionPoint)) {
ScalarType movingImageValue;
params.movingImage.derivsGlobal(centerRegionPoint, movingImageValue, movingImageGradient);
diff = movingImageValue - params.fixedImage.at(samplePos);
inside = 1;
}
mse_params.diffs [x + y*size[0] + z*size[0]*size[1]] = diff;
params.gradients [x + y*size[0] + z*size[0]*size[1]] = movingImageGradient;
params.pixelsCounted[x + y*size[0] + z*size[0]*size[1]] = inside;
}
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateDerivativeMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec3i size = params.paramSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int z = blockDim.z*blockIdx.z + threadIdx.z;
if (x >= size[0] || y >= size[1] || z >= size[2])
return;
Vec3f globalPoint = params.cpImage.toGlobal(Vec3f(Vec3i(x, y, z)));
ScalarType w_i = params.cpwImage.at(Vec3i(x, y, z));
ScalarType support;
ScalarType sigmaP;
Mat3f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(globalPoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(globalPoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec3f indexPoint = params.fixedImage.toLocal(globalPoint);
Vec3i imgLower, imgUpper;
if (DoSubsample) {
imgLower = std::min(std::max(Vec3i((indexPoint - support/params.fixedImage.scale())/float(params.subsample) ), Vec3i(0)), params.subsampledSize - 1);
imgUpper = std::min(std::max(Vec3i((indexPoint + support/params.fixedImage.scale())/float(params.subsample) + 1.0), Vec3i(0)), params.subsampledSize - 1);
} else {
imgLower = std::min(std::max(Vec3i(indexPoint - support/params.fixedImage.scale() ), Vec3i(0)), params.fixedImage.size() - 1);
imgUpper = std::min(std::max(Vec3i(indexPoint + support/params.fixedImage.scale() + 1.0), Vec3i(0)), params.fixedImage.size() - 1);
}
Vec3f derivative(0.0);
for (int zi = imgLower[2]; zi <= imgUpper[2]; ++zi) {
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
int idx;
if (DoSubsample)
idx = xi + params.subsampledSize[0]*(yi + params.subsampledSize[1]*zi);
else
idx = params.fixedImage.toIndex(Vec3i(xi, yi, zi));
ScalarType diff = mse_params.diffs[idx];
Vec3f gradient = params.gradients[idx];
Vec3f point_i;
if (DoSubsample)
point_i = params.fixedImage.toGlobal(Vec3f(Vec3i(xi, yi, zi)*params.subsample + params.gridShift[idx]));
else
point_i = params.fixedImage.toGlobal(Vec3f(Vec3i(xi, yi, zi)));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(globalPoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(globalPoint, point_i, covP);
else
k = params.kernel.evaluate(globalPoint, point_i);
derivative += static_cast<ScalarType>(2.0)*diff*w_i*k*gradient;
}
}
}
const int dimensionStride = params.cpImage.size().product();
int pixelIndex = params.cpImage.toIndex(Vec3i(x, y, z));
for (unsigned d = 0; d < SpaceDimensions; d++){
params.derivatives[pixelIndex + d*dimensionStride] = derivative[d];
}
}
#else //////////// 2D ///////////
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateValueMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec2i size = params.subsampledSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= size[0] || y >= size[1])
return;
Vec2i samplePos = Vec2i(x, y);
if (DoSubsample)
samplePos = samplePos*params.subsample + params.gridShift[x + y*size[0]];
Vec2f fixedImagePoint = params.fixedImage.toGlobal(Vec2f(samplePos));
Vec2f movingImagePoint = fixedImagePoint;
if (params.useDisplacementField)
movingImagePoint += params.displacementField.atGlobal(fixedImagePoint);
ScalarType support;
ScalarType sigmaP;
Mat2f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(fixedImagePoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec2f indexPoint = params.cpImage.toLocal(fixedImagePoint);
Vec2i imgLower = std::min(std::max(Vec2i(indexPoint - support/params.cpImage.scale() + 1.0), Vec2i(0)), params.cpImage.size() - 1);
Vec2i imgUpper = std::min(std::max(Vec2i(indexPoint + support/params.cpImage.scale() ), Vec2i(0)), params.cpImage.size() - 1);
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
Vec2f point_i = params.cpImage.toGlobal(Vec2f(xi, yi));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(fixedImagePoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(fixedImagePoint, point_i, covP);
else
k = params.kernel.evaluate(fixedImagePoint, point_i);
movingImagePoint += params.cpwImage.at(Vec2i(xi, yi))*k*params.cpImage.at(Vec2i(xi, yi));
}
}
ScalarType diff = 0.0;
Vec2f movingImageGradient(0.0);
int inside = 0;
if (params.movingImage.insideGlobal(movingImagePoint)) {
ScalarType movingImageValue;
params.movingImage.derivsGlobal(movingImagePoint, movingImageValue, movingImageGradient);
diff = movingImageValue - params.fixedImage.at(samplePos);
inside = 1;
}
mse_params.diffs [x + y*size[0]] = diff;
params.gradients [x + y*size[0]] = movingImageGradient;
params.pixelsCounted[x + y*size[0]] = inside;
}
template<bool DoSubsample, bool UseWeightImage, bool UseWeightTensor>
__global__ void evaluateDerivativeMSE(GpuParams params, GpuMSEParams mse_params)
{
Vec2i size = params.paramSize;
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= size[0] || y >= size[1])
return;
Vec2f globalPoint = params.cpImage.toGlobal(Vec2f(static_cast<ScalarType>(x), static_cast<ScalarType>(y)));
ScalarType w_i = params.cpwImage.at(Vec2i(x, y));
ScalarType support;
ScalarType sigmaP;
Mat2f covP;
if (UseWeightImage) {
sigmaP = params.kernel.getSigmaAtPoint(globalPoint);
support = params.kernel.getRegionSupport(sigmaP);
} else if (UseWeightTensor) {
covP = params.kernel.getCovarianceAtPoint(globalPoint);
support = params.kernel.getRegionSupport(covP);
} else {
support = params.kernel.getRegionSupport();
}
Vec2f indexPoint = params.fixedImage.toLocal(globalPoint);
Vec2i imgLower, imgUpper;
if (DoSubsample) {
imgLower = std::min(std::max(Vec2i((indexPoint - support/params.fixedImage.scale())/float(params.subsample) + 1.0), Vec2i(0)), params.subsampledSize - 1);
imgUpper = std::min(std::max(Vec2i((indexPoint + support/params.fixedImage.scale())/float(params.subsample) ), Vec2i(0)), params.subsampledSize - 1);
} else {
imgLower = std::min(std::max(Vec2i(indexPoint - support/params.fixedImage.scale() + 1.0), Vec2i(0)), params.fixedImage.size() - 1);
imgUpper = std::min(std::max(Vec2i(indexPoint + support/params.fixedImage.scale() ), Vec2i(0)), params.fixedImage.size() - 1);
}
Vec2f derivative(0.0);
for (int yi = imgLower[1]; yi <= imgUpper[1]; ++yi) {
for (int xi = imgLower[0]; xi <= imgUpper[0]; ++xi) {
int idx;
if (DoSubsample)
idx = xi + params.subsampledSize[0]*yi;
else
idx = params.fixedImage.toIndex(Vec2i(xi, yi));
ScalarType diff = mse_params.diffs[idx];
Vec2f gradient = params.gradients[idx];
Vec2f point_i;
if (DoSubsample)
point_i = params.fixedImage.toGlobal(Vec2f(Vec2i(xi, yi)*params.subsample + params.gridShift[idx]));
else
point_i = params.fixedImage.toGlobal(Vec2f(Vec2i(xi, yi)));
ScalarType k;
if (UseWeightImage)
k = params.kernel.evaluate(globalPoint, point_i, sigmaP);
else if (UseWeightTensor)
k = params.kernel.evaluate(globalPoint, point_i, covP);
else
k = params.kernel.evaluate(globalPoint, point_i);
derivative += static_cast<ScalarType>(2.0)*diff*w_i*k*gradient;
}
}
const int dimensionStride = params.cpImage.size().product();
int pixelIndex = params.cpImage.toIndex(Vec2i(x, y));
for (unsigned d = 0; d < SpaceDimensions; d++){
params.derivatives[pixelIndex + d*dimensionStride] = derivative[d];
}
}
#endif
template<bool DoSubsample>
void resolveValueMSE(dim3 gridDim, dim3 blockDim, GpuParams params, GpuMSEParams mse_params)
{
if (params.kernel.useWeightImage())
evaluateValueMSE<DoSubsample, true, false><<<gridDim, blockDim>>>(params, mse_params);
else if (params.kernel.useWeightTensor())
evaluateValueMSE<DoSubsample, false, true> <<<gridDim, blockDim>>>(params, mse_params);
else
evaluateValueMSE<DoSubsample, false, false><<<gridDim, blockDim>>>(params, mse_params);
}
template<bool DoSubsample>
void resolveDerivativeMSE(dim3 gridDim, dim3 blockDim, GpuParams params, GpuMSEParams mse_params)
{
if (params.kernel.useWeightImage())
evaluateDerivativeMSE<DoSubsample, true, false><<<gridDim, blockDim>>>(params, mse_params);
else if (params.kernel.useWeightTensor())
evaluateDerivativeMSE<DoSubsample, false, true> <<<gridDim, blockDim>>>(params, mse_params);
else
evaluateDerivativeMSE<DoSubsample, false, false><<<gridDim, blockDim>>>(params, mse_params);
}
|
b9d450832d20971d26ba1c2215705553e07e7cc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__host__ __device__ float Clamp(float val, float start, float end) {
return max(min(val, end), start);
}
#define Tilewidth 32
void stencil_cpu(float *_out, float *_in, int width, int height,
int depth) {
// (i,height) is used for z-axis
// (j,width) is used for y-axis
// (k,depth) is used for x-axis
#define out(i, j, k) _out[((i)*width + (j)) * depth + (k)]
#define in(i, j, k) _in[((i)*width + (j)) * depth + (k)]
float res;
for (int i = 1; i < height - 1; ++i) {
for (int j = 1; j < width - 1; ++j) {
for (int k = 1; k < depth - 1; ++k) {
res = in(i, j, k + 1) + in(i, j, k - 1) + in(i, j + 1, k) +
in(i, j - 1, k) + in(i + 1, j, k) + in(i - 1, j, k) -
6 * in(i, j, k);
out(i, j, k) = Clamp(res, 0, 255);
}
}
}
#undef out
#undef in
}
__global__ void stencil(float *output, float *input, int width, int height,
int depth) {
//@@ INSERT CODE HERE
#define output(i, j, k) output[((i)*width + (j)) * depth + (k)]
#define input(i, j, k) input[((i)*width + (j)) * depth + (k)]
//i is z axis(height),j is y axis(width), k is x axis (depth)
__shared__ float shared_array[Tilewidth][Tilewidth];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idz = threadIdx.y + blockIdx.y * blockDim.y;
if((idx<width)&&(idz<depth)){
float bottom = input(0,idx,idz);
float current = input(1,idx,idz);
float top = input(2,idx,idz);
shared_array[threadIdx.y][threadIdx.x] = current;
float result = 0;
float x0,x2,y0,y2;
__syncthreads();
for(int k = 1; k < height-1 ; k++){
/* if(idx==0 || idx== depth-1 || idy== width-1 || idy== 0){
shared_array[threadIdx.y][threadIdx.x] = input(k,idy,idx);
__syncthreads();
}*/
/* if((idx ==0 && idz ==0)||(idx==width-1 && idz == 0)||(idx==0 && idz==depth-1)||(idx==width-1 && idz ==depth -1)){
// shared_array[threadIdx.y][threadIdx.x] = input(k,idx,idz);
}
else if ((idx > width -1)||(idz>depth -1 )){
}
else{*/
x0 = (threadIdx.x>0)?shared_array[threadIdx.y][threadIdx.x-1]:(idx==0)?0:input(k,idx-1,idz);
x2 = (threadIdx.x<blockDim.x-1)?shared_array[threadIdx.y][threadIdx.x+1]:(idx==width-1)?0:input(k,idx+1,idz);
y0 = (threadIdx.y>0)?shared_array[threadIdx.y-1][threadIdx.x]:(idz==0)?0:input(k,idx,idz-1);
y2 = (threadIdx.y<blockDim.y-1)?shared_array[threadIdx.y+1][threadIdx.x]:(idz==depth-1)?0:input(k,idx,idz+1);
result = top + bottom + x0 + x2 + y0 + y2 - 6 * current;
__syncthreads();
bottom = current;
current = top;
shared_array[threadIdx.y][threadIdx.x] = current;
if((k+1)<height-1)
top = input(k+2,idx,idz);
__syncthreads();
if((idx>0 && idx < (width -1))&&(idz>0 && idz<(depth -1)))
output(k,idx,idz) = Clamp(result,0,255);
// }
}
#undef output
#undef input
}
}
static void launch_stencil(float *deviceOutputData, float *deviceInputData, int width,
int height, int depth) {
//@@ INSERT CODE HERE
dim3 DimGrid(ceil((width-1)/32.0),ceil((depth-1)/32.0),1);
dim3 DimBlock(32,32,1);
hipLaunchKernelGGL(( stencil), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceOutputData,deviceInputData,width,height,depth);
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
float *hostInputData;
float *hostOutputData;
float *deviceInputData;
float *deviceOutputData;
arg = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(arg, 0);
input = wbImport(inputFile);
height = wbImage_getHeight(input);
width = wbImage_getWidth(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width,height,depth);
hostInputData = wbImage_getData(input);
hostOutputData = wbImage_getData(output);
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputData, width * height * depth * sizeof(float));
hipMalloc((void **)&deviceOutputData, width * height * depth * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputData, hostInputData, width * height * depth * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputData, deviceOutputData, width * height * depth * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbSolution(arg, output);
hipFree(deviceInputData);
hipFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
| b9d450832d20971d26ba1c2215705553e07e7cc8.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__host__ __device__ float Clamp(float val, float start, float end) {
return max(min(val, end), start);
}
#define Tilewidth 32
void stencil_cpu(float *_out, float *_in, int width, int height,
int depth) {
// (i,height) is used for z-axis
// (j,width) is used for y-axis
// (k,depth) is used for x-axis
#define out(i, j, k) _out[((i)*width + (j)) * depth + (k)]
#define in(i, j, k) _in[((i)*width + (j)) * depth + (k)]
float res;
for (int i = 1; i < height - 1; ++i) {
for (int j = 1; j < width - 1; ++j) {
for (int k = 1; k < depth - 1; ++k) {
res = in(i, j, k + 1) + in(i, j, k - 1) + in(i, j + 1, k) +
in(i, j - 1, k) + in(i + 1, j, k) + in(i - 1, j, k) -
6 * in(i, j, k);
out(i, j, k) = Clamp(res, 0, 255);
}
}
}
#undef out
#undef in
}
__global__ void stencil(float *output, float *input, int width, int height,
int depth) {
//@@ INSERT CODE HERE
#define output(i, j, k) output[((i)*width + (j)) * depth + (k)]
#define input(i, j, k) input[((i)*width + (j)) * depth + (k)]
//i is z axis(height),j is y axis(width), k is x axis (depth)
__shared__ float shared_array[Tilewidth][Tilewidth];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idz = threadIdx.y + blockIdx.y * blockDim.y;
if((idx<width)&&(idz<depth)){
float bottom = input(0,idx,idz);
float current = input(1,idx,idz);
float top = input(2,idx,idz);
shared_array[threadIdx.y][threadIdx.x] = current;
float result = 0;
float x0,x2,y0,y2;
__syncthreads();
for(int k = 1; k < height-1 ; k++){
/* if(idx==0 || idx== depth-1 || idy== width-1 || idy== 0){
shared_array[threadIdx.y][threadIdx.x] = input(k,idy,idx);
__syncthreads();
}*/
/* if((idx ==0 && idz ==0)||(idx==width-1 && idz == 0)||(idx==0 && idz==depth-1)||(idx==width-1 && idz ==depth -1)){
// shared_array[threadIdx.y][threadIdx.x] = input(k,idx,idz);
}
else if ((idx > width -1)||(idz>depth -1 )){
}
else{*/
x0 = (threadIdx.x>0)?shared_array[threadIdx.y][threadIdx.x-1]:(idx==0)?0:input(k,idx-1,idz);
x2 = (threadIdx.x<blockDim.x-1)?shared_array[threadIdx.y][threadIdx.x+1]:(idx==width-1)?0:input(k,idx+1,idz);
y0 = (threadIdx.y>0)?shared_array[threadIdx.y-1][threadIdx.x]:(idz==0)?0:input(k,idx,idz-1);
y2 = (threadIdx.y<blockDim.y-1)?shared_array[threadIdx.y+1][threadIdx.x]:(idz==depth-1)?0:input(k,idx,idz+1);
result = top + bottom + x0 + x2 + y0 + y2 - 6 * current;
__syncthreads();
bottom = current;
current = top;
shared_array[threadIdx.y][threadIdx.x] = current;
if((k+1)<height-1)
top = input(k+2,idx,idz);
__syncthreads();
if((idx>0 && idx < (width -1))&&(idz>0 && idz<(depth -1)))
output(k,idx,idz) = Clamp(result,0,255);
// }
}
#undef output
#undef input
}
}
static void launch_stencil(float *deviceOutputData, float *deviceInputData, int width,
int height, int depth) {
//@@ INSERT CODE HERE
dim3 DimGrid(ceil((width-1)/32.0),ceil((depth-1)/32.0),1);
dim3 DimBlock(32,32,1);
stencil<<<DimGrid,DimBlock>>>(deviceOutputData,deviceInputData,width,height,depth);
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
float *hostInputData;
float *hostOutputData;
float *deviceInputData;
float *deviceOutputData;
arg = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(arg, 0);
input = wbImport(inputFile);
height = wbImage_getHeight(input);
width = wbImage_getWidth(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width,height,depth);
hostInputData = wbImage_getData(input);
hostOutputData = wbImage_getData(output);
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputData, width * height * depth * sizeof(float));
cudaMalloc((void **)&deviceOutputData, width * height * depth * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputData, hostInputData, width * height * depth * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputData, deviceOutputData, width * height * depth * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbSolution(arg, output);
cudaFree(deviceInputData);
cudaFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
|
3bc86d05c7610c79372a18990787c8ed9f781bf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zlobpcg_residuals.cu, normal z -> s, Tue Aug 30 09:38:43 2016
*/
#include "magmasparse_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_s
// copied from snrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloat_ptr x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_slobpcg_res_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evals,
float * X,
float * R,
magmaFloat_ptr res)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if ( row < num_rows) {
for( int i=0; i < num_vecs; i++ ) {
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_S_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_snrm2_kernel(
int m,
float * da,
int ldda,
float * dxnorm )
{
const int i = threadIdx.x;
magmaFloat_ptr dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++) {
magma_saxpy(m, MAGMA_S_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
evalues magmaFloat_ptr
array of eigenvalues/approximations
@param[in]
X magmaFloat_ptr
block of eigenvector approximations
@param[in]
R magmaFloat_ptr
block of residuals
@param[in]
res magmaFloat_ptr
array of residuals
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_res(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evalues,
magmaFloat_ptr X,
magmaFloat_ptr R,
magmaFloat_ptr res,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 threads( block_size );
dim3 grid( magma_ceildiv( num_rows, block_size ) );
hipLaunchKernelGGL(( magma_slobpcg_res_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
| 3bc86d05c7610c79372a18990787c8ed9f781bf7.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zlobpcg_residuals.cu, normal z -> s, Tue Aug 30 09:38:43 2016
*/
#include "magmasparse_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_s
// copied from snrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, magmaFloat_ptr x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_slobpcg_res_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evals,
float * X,
float * R,
magmaFloat_ptr res)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if ( row < num_rows) {
for( int i=0; i < num_vecs; i++ ) {
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_S_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_snrm2_kernel(
int m,
float * da,
int ldda,
float * dxnorm )
{
const int i = threadIdx.x;
magmaFloat_ptr dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_S_REAL( dx[j] );
float im = MAGMA_S_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++) {
magma_saxpy(m, MAGMA_S_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
evalues magmaFloat_ptr
array of eigenvalues/approximations
@param[in]
X magmaFloat_ptr
block of eigenvector approximations
@param[in]
R magmaFloat_ptr
block of residuals
@param[in]
res magmaFloat_ptr
array of residuals
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_res(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloat_ptr evalues,
magmaFloat_ptr X,
magmaFloat_ptr R,
magmaFloat_ptr res,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 threads( block_size );
dim3 grid( magma_ceildiv( num_rows, block_size ) );
magma_slobpcg_res_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
|
363603323472fe5ce11e19a5f3c29e7529bf3cc2.hip | // !!! This is a file automatically generated by hipify!!!
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* Includes, cuda */
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <helper_cuda.h>
/* Matrix size */
#define N (2)
#define nvec 4
/* FFT2 */
int main(int argc, char **argv)
{
hipblasStatus_t status;
float *X_re;
float *FX2_re;
float *X_re_FX2_re;
// float *h_C_ref;
float *X_im;
float *FX2_im;
float *X_re_FX2_im;
float *X_im_FX2_re;
float *X_im_FX2_im;
float *FX_re;
float *FX_im;
float *d_X_re = 0;
float *d_X_im = 0;
float *d_FX2_re = 0;
float *d_FX2_im = 0;
float *d_X_re_FX2_re = 0;
float *d_X_re_FX2_im = 0;
float *d_X_im_FX2_re = 0;
float *d_X_im_FX2_im = 0;
float *d_FX_re;
float *d_FX_im;
float alpha = 1.0f;
float beta = 0.0f;
int n2 = N * nvec;
int i;
hipblasHandle_t handle;
int dev = findCudaDevice(argc, (const char **) argv);
if (dev == -1)
{
return EXIT_FAILURE;
}
/* Initialize CUBLAS */
printf("simpleCUBLAS test running..\n");
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate host memory for the matrices */
X_re = (float *)malloc(n2 * sizeof(X_re[0]));
if (X_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_re)\n");
return EXIT_FAILURE;
}
X_im = (float *)malloc(n2 * sizeof(X_im[0]));
if (X_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_im)\n");
return EXIT_FAILURE;
}
FX2_re = (float *)malloc(4 * sizeof(FX2_re[0]));
if (FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX2_re)\n");
return EXIT_FAILURE;
}
FX2_im = (float *)malloc(4 * sizeof(FX2_im[0]));
if (FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX2_im)\n");
return EXIT_FAILURE;
}
X_re_FX2_re = (float *)malloc(n2 * sizeof(X_re_FX2_re[0]));
if (X_re_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_re_FX2_re)\n");
return EXIT_FAILURE;
}
X_re_FX2_im = (float *)malloc(n2 * sizeof(X_re_FX2_im[0]));
if (X_re_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_re_FX2_im)\n");
return EXIT_FAILURE;
}
X_im_FX2_re = (float *)malloc(n2 * sizeof(X_im_FX2_re[0]));
if (X_im_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_im_FX2_re)\n");
return EXIT_FAILURE;
}
X_im_FX2_im = (float *)malloc(n2 * sizeof(X_im_FX2_im[0]));
if (X_im_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_im_FX2_im)\n");
return EXIT_FAILURE;
}
FX_re = (float *)malloc(n2 * sizeof(FX_re[0]));
if (FX_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX_re)\n");
return EXIT_FAILURE;
}
FX_im = (float *)malloc(n2 * sizeof(FX_im[0]));
if (FX_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX_im)\n");
return EXIT_FAILURE;
}
/* Fill the matrices with test data */
printf("\n X_re = [");
for (i = 0; i < n2; i++)
{
X_re[i] = rand() / (float)RAND_MAX;
printf("%f, ", X_re[i]);
}
printf("]; \n X-im = [");
for (i=0; i<n2; i++)
{
X_im[i] = rand() / (float)RAND_MAX;
printf("%f, ", X_im[i]);
X_re_FX2_re[i] = 0;
}
printf("];");
FX2_re[0] = 1;
FX2_re[1] = 1;
FX2_re[2] = 1;
FX2_re[3] = -1;
FX2_im[0] = 0;
FX2_im[1] = 0;
FX2_im[2] = 0;
FX2_im[3] = 0;
/* Allocate device memory for the matrices */
if (hipMalloc((void **)&d_X_re, n2 * sizeof(d_X_re[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Are)\n");
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_X_im, n2 * sizeof(d_X_im[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Aim)\n");
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_FX2_re, 4 * sizeof(d_FX2_re[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Bre)\n");
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_FX2_im, 4 * sizeof(d_FX2_im[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Bim)\n");
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_X_re_FX2_re, n2 * sizeof(d_X_re_FX2_re[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Crere)\n" );
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_X_re_FX2_im, n2 * sizeof(d_X_re_FX2_im[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Creim)\n" );
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_X_im_FX2_re, n2 * sizeof(d_X_im_FX2_re[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Cimre)\n" );
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_X_im_FX2_im, n2 * sizeof(d_X_im_FX2_im[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Cimim)\n" );
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_FX_im, n2 * sizeof(d_FX_im[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate FXim)\n") ;
return EXIT_FAILURE;
}
if (hipMalloc((void **)&d_FX_re, n2 * sizeof(d_FX_re[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate FXre)\n") ;
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
status = hipblasSetVector(n2, sizeof(X_re[0]), X_re, 1, d_X_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(X_im[0]), X_im, 1, d_X_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write Aim)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(4, sizeof(FX2_re[0]), FX2_re, 1, d_FX2_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(4, sizeof(FX2_im[0]), FX2_im, 1, d_FX2_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(X_re_FX2_re[0]), X_re_FX2_re, 1, d_X_re_ FX2_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(X_re_FX2_im[0]), X_re_FX2_im, 1, d_X_re_ FX2_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(X_im_FX2_re[0]), X_im_FX2_re, 1, d_X_im_ FX2_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(X_im_FX2_im[0]), X_im_FX2_im, 1, d_X_im_ FX2_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(FX_im[0]), FX_im, 1, d_FX_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write FX im)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n2, sizeof(FX_re[0]), FX_re, 1, d_FX_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write FX re)\n");
return EXIT_FAILURE;
}
/* Performs multiply operation using cublas */
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_re, N, d_X_re, N, &beta, d_X_re_FX2_re, N);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_im, N, d_X_re, N, &beta, d_X_re_FX2_im, N);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_re, N, d_X_im, N, &beta, d_X_im_FX2_re, N);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_im, N, d_X_im, N, &beta, d_X_im_FX2_im, N);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Copy into Result Matrix */
status = hipblasScopy(handle, n2,d_X_re_FX2_re, 1,d_FX_re,1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr,"!!! Cublas Kernal Exececution Error copy .\n");
return EXIT_FAILURE;
}
status = hipblasScopy(handle, n2,d_X_re_FX2_im, 1,d_FX_im,1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr,"!!! Cublas Kernal Exececution Error copy .\n");
return EXIT_FAILURE;
}
/* ac-bd */
alpha = -1.0f;
status = hipblasSaxpy(handle, n2, &alpha, d_X_im_FX2_im, 1, d_FX_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ac - bd).\n");
return EXIT_FAILURE;
}
alpha = 1.0f;
status = hipblasSaxpy(handle, n2, &alpha, d_X_im_FX2_re, 1, d_FX_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ad + bc).\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
X_re_FX2_re = (float *)malloc(n2 * sizeof(X_re_FX2_re[0]));
if (X_re_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
X_re_FX2_im = (float *)malloc(n2 * sizeof(X_re_FX2_im[0]));
if (X_re_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
X_im_FX2_re = (float *)malloc(n2 * sizeof(X_im_FX2_re[0]));
if (X_im_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
X_im_FX2_im = (float *)malloc(n2 * sizeof(X_im_FX2_im[0]));
if (X_im_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = hipblasGetVector(n2, sizeof(X_re_FX2_re[0]), d_X_re_FX2_re, 1, X_re_ FX2_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = hipblasGetVector(n2, sizeof(X_re_FX2_im[0]), d_X_re_FX2_im, 1, X_re_ FX2_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = hipblasGetVector(n2, sizeof(X_im_FX2_re[0]), d_X_im_FX2_re, 1, X_im_ FX2_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = hipblasGetVector(n2, sizeof(X_im_FX2_im[0]), d_X_im_FX2_im, 1, X_im_ FX2_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = hipblasGetVector(n2, sizeof(FX_re[0]), d_FX_re, 1, FX_re, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = hipblasGetVector(n2, sizeof(FX_im[0]), d_FX_im, 1, FX_im, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
printf("\nResult: \n");
printf(" F_re = [");
for(int k=0;k<n2;k++)
{
printf(" %f,", FX_re[k]);
}
printf("]; \n F_im = [");
for(int k=0;k<n2;k++)
{
printf("%f,", FX_im[k]);
}
printf("];\n");
/* Memory clean up */
free(X_re);
free(FX2_re);
free(X_re_FX2_re);
// free(h_C_ref);
free(X_im);
free(FX2_im);
free(X_re_FX2_im);
free(X_im_FX2_re);
free(X_im_FX2_im);
free(FX_re);
free(FX_im);
if (hipFree(d_X_re) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_FX2_re) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (hipFree(d_X_re_FX2_re) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_X_im) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_FX2_im) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (hipFree(d_X_re_FX2_im) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_X_im_FX2_re) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_X_im_FX2_im) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_FX_re) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_FX_im) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
}
| 363603323472fe5ce11e19a5f3c29e7529bf3cc2.cu |
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* Includes, cuda */
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <helper_cuda.h>
/* Matrix size */
#define N (2)
#define nvec 4
/* FFT2 */
int main(int argc, char **argv)
{
cublasStatus_t status;
float *X_re;
float *FX2_re;
float *X_re_FX2_re;
// float *h_C_ref;
float *X_im;
float *FX2_im;
float *X_re_FX2_im;
float *X_im_FX2_re;
float *X_im_FX2_im;
float *FX_re;
float *FX_im;
float *d_X_re = 0;
float *d_X_im = 0;
float *d_FX2_re = 0;
float *d_FX2_im = 0;
float *d_X_re_FX2_re = 0;
float *d_X_re_FX2_im = 0;
float *d_X_im_FX2_re = 0;
float *d_X_im_FX2_im = 0;
float *d_FX_re;
float *d_FX_im;
float alpha = 1.0f;
float beta = 0.0f;
int n2 = N * nvec;
int i;
cublasHandle_t handle;
int dev = findCudaDevice(argc, (const char **) argv);
if (dev == -1)
{
return EXIT_FAILURE;
}
/* Initialize CUBLAS */
printf("simpleCUBLAS test running..\n");
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate host memory for the matrices */
X_re = (float *)malloc(n2 * sizeof(X_re[0]));
if (X_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_re)\n");
return EXIT_FAILURE;
}
X_im = (float *)malloc(n2 * sizeof(X_im[0]));
if (X_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_im)\n");
return EXIT_FAILURE;
}
FX2_re = (float *)malloc(4 * sizeof(FX2_re[0]));
if (FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX2_re)\n");
return EXIT_FAILURE;
}
FX2_im = (float *)malloc(4 * sizeof(FX2_im[0]));
if (FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX2_im)\n");
return EXIT_FAILURE;
}
X_re_FX2_re = (float *)malloc(n2 * sizeof(X_re_FX2_re[0]));
if (X_re_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_re_FX2_re)\n");
return EXIT_FAILURE;
}
X_re_FX2_im = (float *)malloc(n2 * sizeof(X_re_FX2_im[0]));
if (X_re_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_re_FX2_im)\n");
return EXIT_FAILURE;
}
X_im_FX2_re = (float *)malloc(n2 * sizeof(X_im_FX2_re[0]));
if (X_im_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_im_FX2_re)\n");
return EXIT_FAILURE;
}
X_im_FX2_im = (float *)malloc(n2 * sizeof(X_im_FX2_im[0]));
if (X_im_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (X_im_FX2_im)\n");
return EXIT_FAILURE;
}
FX_re = (float *)malloc(n2 * sizeof(FX_re[0]));
if (FX_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX_re)\n");
return EXIT_FAILURE;
}
FX_im = (float *)malloc(n2 * sizeof(FX_im[0]));
if (FX_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (FX_im)\n");
return EXIT_FAILURE;
}
/* Fill the matrices with test data */
printf("\n X_re = [");
for (i = 0; i < n2; i++)
{
X_re[i] = rand() / (float)RAND_MAX;
printf("%f, ", X_re[i]);
}
printf("]; \n X-im = [");
for (i=0; i<n2; i++)
{
X_im[i] = rand() / (float)RAND_MAX;
printf("%f, ", X_im[i]);
X_re_FX2_re[i] = 0;
}
printf("];");
FX2_re[0] = 1;
FX2_re[1] = 1;
FX2_re[2] = 1;
FX2_re[3] = -1;
FX2_im[0] = 0;
FX2_im[1] = 0;
FX2_im[2] = 0;
FX2_im[3] = 0;
/* Allocate device memory for the matrices */
if (cudaMalloc((void **)&d_X_re, n2 * sizeof(d_X_re[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Are)\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_X_im, n2 * sizeof(d_X_im[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Aim)\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_FX2_re, 4 * sizeof(d_FX2_re[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Bre)\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_FX2_im, 4 * sizeof(d_FX2_im[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Bim)\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_X_re_FX2_re, n2 * sizeof(d_X_re_FX2_re[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Crere)\n" );
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_X_re_FX2_im, n2 * sizeof(d_X_re_FX2_im[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Creim)\n" );
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_X_im_FX2_re, n2 * sizeof(d_X_im_FX2_re[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Cimre)\n" );
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_X_im_FX2_im, n2 * sizeof(d_X_im_FX2_im[0])) != cu daSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate Cimim)\n" );
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_FX_im, n2 * sizeof(d_FX_im[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate FXim)\n") ;
return EXIT_FAILURE;
}
if (cudaMalloc((void **)&d_FX_re, n2 * sizeof(d_FX_re[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate FXre)\n") ;
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
status = cublasSetVector(n2, sizeof(X_re[0]), X_re, 1, d_X_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(X_im[0]), X_im, 1, d_X_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write Aim)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(4, sizeof(FX2_re[0]), FX2_re, 1, d_FX2_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(4, sizeof(FX2_im[0]), FX2_im, 1, d_FX2_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(X_re_FX2_re[0]), X_re_FX2_re, 1, d_X_re_ FX2_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(X_re_FX2_im[0]), X_re_FX2_im, 1, d_X_re_ FX2_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(X_im_FX2_re[0]), X_im_FX2_re, 1, d_X_im_ FX2_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(X_im_FX2_im[0]), X_im_FX2_im, 1, d_X_im_ FX2_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(FX_im[0]), FX_im, 1, d_FX_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write FX im)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n2, sizeof(FX_re[0]), FX_re, 1, d_FX_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write FX re)\n");
return EXIT_FAILURE;
}
/* Performs multiply operation using cublas */
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_re, N, d_X_re, N, &beta, d_X_re_FX2_re, N);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_im, N, d_X_re, N, &beta, d_X_re_FX2_im, N);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_re, N, d_X_im, N, &beta, d_X_im_FX2_re, N);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, nvec,N, &alpha, d_ FX2_im, N, d_X_im, N, &beta, d_X_im_FX2_im, N);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Copy into Result Matrix */
status = cublasScopy(handle, n2,d_X_re_FX2_re, 1,d_FX_re,1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr,"!!! Cublas Kernal Exececution Error copy .\n");
return EXIT_FAILURE;
}
status = cublasScopy(handle, n2,d_X_re_FX2_im, 1,d_FX_im,1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr,"!!! Cublas Kernal Exececution Error copy .\n");
return EXIT_FAILURE;
}
/* ac-bd */
alpha = -1.0f;
status = cublasSaxpy(handle, n2, &alpha, d_X_im_FX2_im, 1, d_FX_re, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ac - bd).\n");
return EXIT_FAILURE;
}
alpha = 1.0f;
status = cublasSaxpy(handle, n2, &alpha, d_X_im_FX2_re, 1, d_FX_im, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS kernel execution error (ad + bc).\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
X_re_FX2_re = (float *)malloc(n2 * sizeof(X_re_FX2_re[0]));
if (X_re_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
X_re_FX2_im = (float *)malloc(n2 * sizeof(X_re_FX2_im[0]));
if (X_re_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
X_im_FX2_re = (float *)malloc(n2 * sizeof(X_im_FX2_re[0]));
if (X_im_FX2_re == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
X_im_FX2_im = (float *)malloc(n2 * sizeof(X_im_FX2_im[0]));
if (X_im_FX2_im == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = cublasGetVector(n2, sizeof(X_re_FX2_re[0]), d_X_re_FX2_re, 1, X_re_ FX2_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = cublasGetVector(n2, sizeof(X_re_FX2_im[0]), d_X_re_FX2_im, 1, X_re_ FX2_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = cublasGetVector(n2, sizeof(X_im_FX2_re[0]), d_X_im_FX2_re, 1, X_im_ FX2_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = cublasGetVector(n2, sizeof(X_im_FX2_im[0]), d_X_im_FX2_im, 1, X_im_ FX2_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = cublasGetVector(n2, sizeof(FX_re[0]), d_FX_re, 1, FX_re, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
status = cublasGetVector(n2, sizeof(FX_im[0]), d_FX_im, 1, FX_im, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
printf("\nResult: \n");
printf(" F_re = [");
for(int k=0;k<n2;k++)
{
printf(" %f,", FX_re[k]);
}
printf("]; \n F_im = [");
for(int k=0;k<n2;k++)
{
printf("%f,", FX_im[k]);
}
printf("];\n");
/* Memory clean up */
free(X_re);
free(FX2_re);
free(X_re_FX2_re);
// free(h_C_ref);
free(X_im);
free(FX2_im);
free(X_re_FX2_im);
free(X_im_FX2_re);
free(X_im_FX2_im);
free(FX_re);
free(FX_im);
if (cudaFree(d_X_re) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_FX2_re) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_X_re_FX2_re) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_X_im) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_FX2_im) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_X_re_FX2_im) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_X_im_FX2_re) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_X_im_FX2_im) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_FX_re) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_FX_im) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
}
|
c8c81c6cf4b5618e3471289ba94fa19c0d25773c.hip | // !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "TH.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_SpatialCrossMapLRN_init(L);
cunn_SpatialBatchNormalization_init(L);
cunn_SpatialConvolutionLocal_init(L);
cunn_SpatialFullConvolution_init(L);
cunn_SpatialMaxUnpooling_init(L);
cunn_SpatialFractionalMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialUpSamplingNearest_init(L);
return 1;
}
| c8c81c6cf4b5618e3471289ba94fa19c0d25773c.cu | #include "luaT.h"
#include "TH.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_SpatialCrossMapLRN_init(L);
cunn_SpatialBatchNormalization_init(L);
cunn_SpatialConvolutionLocal_init(L);
cunn_SpatialFullConvolution_init(L);
cunn_SpatialMaxUnpooling_init(L);
cunn_SpatialFractionalMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialUpSamplingNearest_init(L);
return 1;
}
|
f67c722e10bf981ace1648a0d07e97d918a29644.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kfusion.h"
#include "perfstats.h"
#include <iostream>
using namespace std;
PerfStats Stats;
__global__ void setSphere( Volume volume, const float3 center, const float radius, const float val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z) {
const float d = length(volume.pos(pos) - center);
if(d < radius)
volume.set(pos, make_float2(val, 0.0f));
}
}
__global__ void setBox( Volume volume, const float3 min_corner, const float3 max_corner, const float val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z) {
const float3 p = volume.pos(pos);
if(min_corner.x < p.x && min_corner.y < p.y && min_corner.z < p.z &&
p.x < max_corner.x && p.y < max_corner.y && p.z < max_corner.z )
volume.set(pos, make_float2(val, 0.0f));
}
}
void initVolumeWrap( Volume volume, const float val ){
dim3 block(32,16);
hipLaunchKernelGGL(( initVolume), dim3(divup(dim3(volume.size.x, volume.size.y), block)), dim3(block), 0, 0, volume, make_float2(val, 0.0f));
}
void setBoxWrap(Volume volume, const float3 min_corner, const float3 max_corner, const float val ){
dim3 block(32,16);
hipLaunchKernelGGL(( setBox), dim3(divup(dim3(volume.size.x, volume.size.y), block)), dim3(block), 0, 0, volume, min_corner, max_corner, val);
}
void setSphereWrap(Volume volume, const float3 center, const float radius, const float val ){
dim3 block(32,16);
hipLaunchKernelGGL(( setSphere), dim3(divup(dim3(volume.size.x, volume.size.y), block)), dim3(block), 0, 0, volume, center, radius, val);
}
__global__ void renderNormals( Image<uchar3> out, const Image<float3> in ){
float3 n = in.el();
if(n.x == -2)
out.el() = make_uchar3(0,0,0);
else {
n = normalize(n);
out.el() = make_uchar3(n.x*128 + 128, n.y*128+128, n.z*128+128);
}
}
void renderNormalMap( Image<uchar3> out, const Image<float3> & normal ){
dim3 block(20,20);
hipLaunchKernelGGL(( renderNormals), dim3(divup(normal.size, block)), dim3(block), 0, 0, out, normal );
}
__global__ void renderLightKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const float3 light, const float3 ambient ){
if(normal.el().x == -2)
out.el() = make_uchar4(0,0,0,0);
else {
const float3 diff = normalize(light - vertex.el());
const float dir = fmaxf(dot(normal.el(), diff), 0.f);
const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255;
out.el() = make_uchar4(col.x, col.y, col.z, 255);
}
}
void renderLight( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const float3 light, const float3 ambient ){
dim3 block(32,16);
hipLaunchKernelGGL(( renderLightKernel), dim3(divup(normal.size, block)), dim3(block), 0, 0, out, vertex, normal, light, ambient );
}
__global__ void renderTextureKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const Image<uchar3> texture, const Matrix4 texproj, const float3 light){
if(normal.el().x == -2)
out.el() = make_uchar4(0,0,0,0);
else {
const float3 proj = texproj * vertex.el();
const float2 projPixel = make_float2( proj.x / proj.z + 0.5f, proj.y / proj.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > texture.size.x-1 || projPixel.y < 0 || projPixel.y > texture.size.y-1 ){
const float3 diff = normalize(light - vertex.el());
const float dir = fmaxf(dot(normal.el(), diff), 0.f) * 255;
out.el() = make_uchar4(dir,dir,dir,255);
} else {
const uchar3 texcol = texture[make_uint2(projPixel.x, projPixel.y)];
out.el() = make_uchar4(texcol.x, texcol.y, texcol.z, 255);
}
}
}
void renderTexture( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const Image<uchar3> & texture, const Matrix4 & texproj, const float3 light){
dim3 block(32,16);
hipLaunchKernelGGL(( renderTextureKernel), dim3(divup(normal.size, block)), dim3(block), 0, 0, out, vertex, normal, texture, texproj, light);
}
__global__ void renderDepth( Image<uchar3> out, const Image<float> depth, const float nearPlane, const float farPlane){
const float d = (clamp(depth.el(), nearPlane, farPlane) - nearPlane) / (farPlane - nearPlane);
out.el() = make_uchar3(d * 255, d * 255, d * 255);
}
void renderDepthMap( Image<uchar3> out, const Image<float> & depth, const float nearPlane, const float farPlane ){
dim3 block(32,16);
hipLaunchKernelGGL(( renderDepth), dim3(divup(depth.size, block)), dim3(block), 0, 0, out, depth, nearPlane, farPlane );
}
__global__ void renderTrack( Image<uchar4> out, const Image<TrackData> data ){
const uint2 pos = thr2pos2();
switch(data[pos].result){
case 1: out[pos] = make_uchar4(128, 128, 128,0); // ok
break;
case -1: out[pos] = make_uchar4(0, 0, 0,0); // no input
break;
case -2: out[pos] = make_uchar4(255,0,0,0); // not in image
break;
case -3: out[pos] = make_uchar4(0,255,0,0); // no correspondence
break;
case -4: out[pos] = make_uchar4(0,0,255,0); // to far away
break;
case -5: out[pos] = make_uchar4(255,255,0,0); // wrong normal
break;
}
}
void renderTrackResult( Image<uchar4> out, const Image<TrackData> & data ){
dim3 block(32,16);
hipLaunchKernelGGL(( renderTrack), dim3(divup(out.size, block)), dim3(block), 0, 0, out, data );
}
__global__ void raycastLight( Image<uchar4> render, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep, const float3 light, const float3 ambient){
const uint2 pos = thr2pos2();
float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep);
if(hit.w > 0){
const float3 test = make_float3(hit);
const float3 surfNorm = volume.grad(test);
if(length(surfNorm) > 0){
const float3 diff = normalize(light - test);
const float dir = fmaxf(dot(normalize(surfNorm), diff), 0.f);
const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255;
render.el() = make_uchar4(col.x, col.y, col.z,0);
} else {
render.el() = make_uchar4(0,0,0,0);
}
} else {
render.el() = make_uchar4(0,0,0,0);
}
}
void renderVolumeLight( Image<uchar4> out, const Volume & volume, const Matrix4 view, const float nearPlane, const float farPlane, const float largestep, const float3 light, const float3 ambient ){
dim3 block(16,16);
hipLaunchKernelGGL(( raycastLight), dim3(divup(out.size, block)), dim3(block), 0, 0, out, volume, view, nearPlane, farPlane, volume.dim.x/volume.size.x, largestep, light, ambient );
}
__global__ void raycastInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
const uint2 pos = thr2pos2();
float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep);
if(hit.w > 0){
pos3D[pos] = make_float3(hit);
depth[pos] = hit.w;
float3 surfNorm = volume.grad(make_float3(hit));
if(length(surfNorm) == 0){
normal[pos].x = -2;
} else {
normal[pos] = normalize(surfNorm);
}
} else {
pos3D[pos] = make_float3(0);
normal[pos] = make_float3(0);
depth[pos] = 0;
}
}
void renderInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
dim3 block(16,16);
hipLaunchKernelGGL(( raycastInput), dim3(divup(pos3D.size, block)), dim3(block), 0, 0, pos3D, normal, depth, volume, view, nearPlane, farPlane, step, largestep);
}
| f67c722e10bf981ace1648a0d07e97d918a29644.cu | #include "kfusion.h"
#include "perfstats.h"
#include <iostream>
using namespace std;
PerfStats Stats;
__global__ void setSphere( Volume volume, const float3 center, const float radius, const float val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z) {
const float d = length(volume.pos(pos) - center);
if(d < radius)
volume.set(pos, make_float2(val, 0.0f));
}
}
__global__ void setBox( Volume volume, const float3 min_corner, const float3 max_corner, const float val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z) {
const float3 p = volume.pos(pos);
if(min_corner.x < p.x && min_corner.y < p.y && min_corner.z < p.z &&
p.x < max_corner.x && p.y < max_corner.y && p.z < max_corner.z )
volume.set(pos, make_float2(val, 0.0f));
}
}
void initVolumeWrap( Volume volume, const float val ){
dim3 block(32,16);
initVolume<<<divup(dim3(volume.size.x, volume.size.y), block), block>>>(volume, make_float2(val, 0.0f));
}
void setBoxWrap(Volume volume, const float3 min_corner, const float3 max_corner, const float val ){
dim3 block(32,16);
setBox<<<divup(dim3(volume.size.x, volume.size.y), block), block>>>(volume, min_corner, max_corner, val);
}
void setSphereWrap(Volume volume, const float3 center, const float radius, const float val ){
dim3 block(32,16);
setSphere<<<divup(dim3(volume.size.x, volume.size.y), block), block>>>(volume, center, radius, val);
}
__global__ void renderNormals( Image<uchar3> out, const Image<float3> in ){
float3 n = in.el();
if(n.x == -2)
out.el() = make_uchar3(0,0,0);
else {
n = normalize(n);
out.el() = make_uchar3(n.x*128 + 128, n.y*128+128, n.z*128+128);
}
}
void renderNormalMap( Image<uchar3> out, const Image<float3> & normal ){
dim3 block(20,20);
renderNormals<<<divup(normal.size, block), block>>>( out, normal );
}
__global__ void renderLightKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const float3 light, const float3 ambient ){
if(normal.el().x == -2)
out.el() = make_uchar4(0,0,0,0);
else {
const float3 diff = normalize(light - vertex.el());
const float dir = fmaxf(dot(normal.el(), diff), 0.f);
const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255;
out.el() = make_uchar4(col.x, col.y, col.z, 255);
}
}
void renderLight( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const float3 light, const float3 ambient ){
dim3 block(32,16);
renderLightKernel<<<divup(normal.size, block), block>>>( out, vertex, normal, light, ambient );
}
__global__ void renderTextureKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const Image<uchar3> texture, const Matrix4 texproj, const float3 light){
if(normal.el().x == -2)
out.el() = make_uchar4(0,0,0,0);
else {
const float3 proj = texproj * vertex.el();
const float2 projPixel = make_float2( proj.x / proj.z + 0.5f, proj.y / proj.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > texture.size.x-1 || projPixel.y < 0 || projPixel.y > texture.size.y-1 ){
const float3 diff = normalize(light - vertex.el());
const float dir = fmaxf(dot(normal.el(), diff), 0.f) * 255;
out.el() = make_uchar4(dir,dir,dir,255);
} else {
const uchar3 texcol = texture[make_uint2(projPixel.x, projPixel.y)];
out.el() = make_uchar4(texcol.x, texcol.y, texcol.z, 255);
}
}
}
void renderTexture( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const Image<uchar3> & texture, const Matrix4 & texproj, const float3 light){
dim3 block(32,16);
renderTextureKernel<<<divup(normal.size, block), block>>>( out, vertex, normal, texture, texproj, light);
}
__global__ void renderDepth( Image<uchar3> out, const Image<float> depth, const float nearPlane, const float farPlane){
const float d = (clamp(depth.el(), nearPlane, farPlane) - nearPlane) / (farPlane - nearPlane);
out.el() = make_uchar3(d * 255, d * 255, d * 255);
}
void renderDepthMap( Image<uchar3> out, const Image<float> & depth, const float nearPlane, const float farPlane ){
dim3 block(32,16);
renderDepth<<<divup(depth.size, block), block>>>( out, depth, nearPlane, farPlane );
}
__global__ void renderTrack( Image<uchar4> out, const Image<TrackData> data ){
const uint2 pos = thr2pos2();
switch(data[pos].result){
case 1: out[pos] = make_uchar4(128, 128, 128,0); // ok
break;
case -1: out[pos] = make_uchar4(0, 0, 0,0); // no input
break;
case -2: out[pos] = make_uchar4(255,0,0,0); // not in image
break;
case -3: out[pos] = make_uchar4(0,255,0,0); // no correspondence
break;
case -4: out[pos] = make_uchar4(0,0,255,0); // to far away
break;
case -5: out[pos] = make_uchar4(255,255,0,0); // wrong normal
break;
}
}
void renderTrackResult( Image<uchar4> out, const Image<TrackData> & data ){
dim3 block(32,16);
renderTrack<<<divup(out.size, block), block>>>( out, data );
}
__global__ void raycastLight( Image<uchar4> render, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep, const float3 light, const float3 ambient){
const uint2 pos = thr2pos2();
float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep);
if(hit.w > 0){
const float3 test = make_float3(hit);
const float3 surfNorm = volume.grad(test);
if(length(surfNorm) > 0){
const float3 diff = normalize(light - test);
const float dir = fmaxf(dot(normalize(surfNorm), diff), 0.f);
const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255;
render.el() = make_uchar4(col.x, col.y, col.z,0);
} else {
render.el() = make_uchar4(0,0,0,0);
}
} else {
render.el() = make_uchar4(0,0,0,0);
}
}
void renderVolumeLight( Image<uchar4> out, const Volume & volume, const Matrix4 view, const float nearPlane, const float farPlane, const float largestep, const float3 light, const float3 ambient ){
dim3 block(16,16);
raycastLight<<<divup(out.size, block), block>>>( out, volume, view, nearPlane, farPlane, volume.dim.x/volume.size.x, largestep, light, ambient );
}
__global__ void raycastInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
const uint2 pos = thr2pos2();
float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep);
if(hit.w > 0){
pos3D[pos] = make_float3(hit);
depth[pos] = hit.w;
float3 surfNorm = volume.grad(make_float3(hit));
if(length(surfNorm) == 0){
normal[pos].x = -2;
} else {
normal[pos] = normalize(surfNorm);
}
} else {
pos3D[pos] = make_float3(0);
normal[pos] = make_float3(0);
depth[pos] = 0;
}
}
void renderInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
dim3 block(16,16);
raycastInput<<<divup(pos3D.size, block), block>>>(pos3D, normal, depth, volume, view, nearPlane, farPlane, step, largestep);
}
|
7a469bcfb5ce19bd5be4c148d1be50d97cfe37cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
#define BLOCKSIZE 256
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS]; /* values at time t */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 0; j < tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
__global__
void do_math(float *values, int npoints, int niters)
{
int myidx = blockIdx.x * BLOCKSIZE + threadIdx.x;
if (myidx < npoints-1)
{
float dtime, c, dx, tau, sqtau;
float ov, va, nv;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
va = values[myidx];
ov = va;
for (int i = 0; i < niters; i++)
{
nv = (2.0 * va) - ov + (sqtau * (-2.0)*va); // Can't combine, the answer will be different.
ov = va;
va = nv;
}
values[myidx] = va;
}
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
void update()
{
float *vd;
int tt_size = tpoints * sizeof(float);
hipMalloc((void**)&vd, tt_size);
hipMemcpy(vd, values, tt_size, hipMemcpyHostToDevice);
// Determine GridSize and BlockSize
int gridSize = (tpoints-1) / BLOCKSIZE + 1;
//dim3 dimGrid(gridSize, 1);
//dim3 dimBlock(BLOCKSIZE, 1);
/* Update values for each time step */
hipLaunchKernelGGL(( do_math), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, vd, tpoints, nsteps);
/* Update points along line for this time step */
/* Update old values with new values */
hipMemcpy(values, vd, tt_size, hipMemcpyDeviceToHost);
hipFree(vd);
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if ((i+1)%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
update();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
| 7a469bcfb5ce19bd5be4c148d1be50d97cfe37cf.cu | /**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
#define BLOCKSIZE 256
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS]; /* values at time t */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 0; j < tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
__global__
void do_math(float *values, int npoints, int niters)
{
int myidx = blockIdx.x * BLOCKSIZE + threadIdx.x;
if (myidx < npoints-1)
{
float dtime, c, dx, tau, sqtau;
float ov, va, nv;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
va = values[myidx];
ov = va;
for (int i = 0; i < niters; i++)
{
nv = (2.0 * va) - ov + (sqtau * (-2.0)*va); // Can't combine, the answer will be different.
ov = va;
va = nv;
}
values[myidx] = va;
}
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
void update()
{
float *vd;
int tt_size = tpoints * sizeof(float);
cudaMalloc((void**)&vd, tt_size);
cudaMemcpy(vd, values, tt_size, cudaMemcpyHostToDevice);
// Determine GridSize and BlockSize
int gridSize = (tpoints-1) / BLOCKSIZE + 1;
//dim3 dimGrid(gridSize, 1);
//dim3 dimBlock(BLOCKSIZE, 1);
/* Update values for each time step */
do_math<<<gridSize, BLOCKSIZE>>>(vd, tpoints, nsteps);
/* Update points along line for this time step */
/* Update old values with new values */
cudaMemcpy(values, vd, tt_size, cudaMemcpyDeviceToHost);
cudaFree(vd);
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if ((i+1)%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
update();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
bc1bec2bf425272333307de802eca7727a75f1a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUDA_matrix_operators.h"
std::vector<float> CUDA_mult_MAT(const std::vector<float> &data_1 , const uint64_t data_1_rows, const uint64_t data_1_columns,
const std::vector<float> &data_2 , const uint64_t data_2_rows, const uint64_t data_2_columns){
hipblasHandle_t handle;
cublasErrchk(hipblasCreate(&handle));
// std::cout << "data_1_rows: " << data_1_rows << " data_1_columns: " << data_1_columns << "\n";
// std::cout << "data_2_rows: " << data_2_rows << " data_2_columns: " << data_2_columns << "\n";
std::vector<float> result(data_1_rows * data_2_columns);
/*----------------------------------------------------------------------------------------------*/
float* GPU_data_1 = NULL;
gpuErrchk(hipMalloc((void**)&GPU_data_1 , data_1.size()*sizeof(float)));
gpuErrchk(hipMemcpy(GPU_data_1, data_1.data(), data_1.size()*sizeof(float), hipMemcpyHostToDevice));
float* GPU_data_2 = NULL;
gpuErrchk(hipMalloc((void**)&GPU_data_2 ,data_2.size()*sizeof(float)));
gpuErrchk(hipMemcpy(GPU_data_2, data_2.data(), data_2.size()*sizeof(float), hipMemcpyHostToDevice));
float* GPU_result = NULL;
gpuErrchk(hipMalloc((void**)&GPU_result , result.size()*sizeof(float)));
/*----------------------------------------------------------------------------------------------*/
//hipblasSgemm(handle , operation , operation , m , n , k , alpha , A , lda , B , ldb , beta , C , ldc
//(m X n) * (n X k) -> (m X k)
//C = (alpha*A) * B + (beta*C)
const float alpha = 1.f; //Needs to be defined as a variable as it can be either a host or a device pointer (type float* in argument)
const float beta = 0.f;
cublasErrchk(
hipblasSgemm(handle , HIPBLAS_OP_N , HIPBLAS_OP_N,
data_2_columns , data_1_rows ,data_1_columns,
&alpha , GPU_data_2 , data_2_columns,
GPU_data_1 , data_1_columns,
&beta , GPU_result , data_2_columns)
);
gpuErrchk(hipMemcpy(result.data() , GPU_result , result.size() * sizeof(float) , hipMemcpyDeviceToHost));
gpuErrchk(hipFree(GPU_data_1));
gpuErrchk(hipFree(GPU_data_2));
gpuErrchk(hipFree(GPU_result));
cublasErrchk(hipblasDestroy(handle));
return result;
}
| bc1bec2bf425272333307de802eca7727a75f1a5.cu | #include "CUDA_matrix_operators.h"
std::vector<float> CUDA_mult_MAT(const std::vector<float> &data_1 , const uint64_t data_1_rows, const uint64_t data_1_columns,
const std::vector<float> &data_2 , const uint64_t data_2_rows, const uint64_t data_2_columns){
cublasHandle_t handle;
cublasErrchk(cublasCreate(&handle));
// std::cout << "data_1_rows: " << data_1_rows << " data_1_columns: " << data_1_columns << "\n";
// std::cout << "data_2_rows: " << data_2_rows << " data_2_columns: " << data_2_columns << "\n";
std::vector<float> result(data_1_rows * data_2_columns);
/*----------------------------------------------------------------------------------------------*/
float* GPU_data_1 = NULL;
gpuErrchk(cudaMalloc((void**)&GPU_data_1 , data_1.size()*sizeof(float)));
gpuErrchk(cudaMemcpy(GPU_data_1, data_1.data(), data_1.size()*sizeof(float), cudaMemcpyHostToDevice));
float* GPU_data_2 = NULL;
gpuErrchk(cudaMalloc((void**)&GPU_data_2 ,data_2.size()*sizeof(float)));
gpuErrchk(cudaMemcpy(GPU_data_2, data_2.data(), data_2.size()*sizeof(float), cudaMemcpyHostToDevice));
float* GPU_result = NULL;
gpuErrchk(cudaMalloc((void**)&GPU_result , result.size()*sizeof(float)));
/*----------------------------------------------------------------------------------------------*/
//cublasSgemm(handle , operation , operation , m , n , k , alpha , A , lda , B , ldb , beta , C , ldc
//(m X n) * (n X k) -> (m X k)
//C = (alpha*A) * B + (beta*C)
const float alpha = 1.f; //Needs to be defined as a variable as it can be either a host or a device pointer (type float* in argument)
const float beta = 0.f;
cublasErrchk(
cublasSgemm(handle , CUBLAS_OP_N , CUBLAS_OP_N,
data_2_columns , data_1_rows ,data_1_columns,
&alpha , GPU_data_2 , data_2_columns,
GPU_data_1 , data_1_columns,
&beta , GPU_result , data_2_columns)
);
gpuErrchk(cudaMemcpy(result.data() , GPU_result , result.size() * sizeof(float) , cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(GPU_data_1));
gpuErrchk(cudaFree(GPU_data_2));
gpuErrchk(cudaFree(GPU_result));
cublasErrchk(cublasDestroy_v2(handle));
return result;
}
|
d662892d032f684509c59685aa6d2e8237f0aff5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
extern const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
extern int cublas_init() {
hipblasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
extern int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByColVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByRowVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEquals), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEqualsScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int minimum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinimum), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int minimum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinimumScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int maximum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaximum), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int maximum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaximumScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int min_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaxRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmin_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMinColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMinRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMaxRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_soft_threshold(cudamat* mat, float alpha, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySoftThreshold), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_gamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGamma), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_lgamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogGamma), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
hipDeviceSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
hipblasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
hipblasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = hipblasSnrm2(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern float manhattan_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = hipblasSasum(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int where(cudamat* condition_mat, cudamat* if_mat, cudamat* else_mat, cudamat* target) {
unsigned int len = condition_mat->size[0] * condition_mat->size[1];
if (!condition_mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (condition_mat->size[0] != target->size[0] || condition_mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != if_mat->size[0] || condition_mat->size[1] != if_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != else_mat->size[0] || condition_mat->size[1] != else_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kWhere), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, condition_mat->data_device,
if_mat->data_device, else_mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
}
| d662892d032f684509c59685aa6d2e8237f0aff5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEquals<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEqualsScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int minimum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinimum<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int minimum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinimumScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int maximum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaximum<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int maximum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaximumScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int min_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaxRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmin_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMinColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMinRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMaxRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_soft_threshold(cudamat* mat, float alpha, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySoftThreshold<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_gamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGamma<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_lgamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogGamma<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
cublasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kAdd<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
cublasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kMultScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = cublasSnrm2(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern float manhattan_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = cublasSasum(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int where(cudamat* condition_mat, cudamat* if_mat, cudamat* else_mat, cudamat* target) {
unsigned int len = condition_mat->size[0] * condition_mat->size[1];
if (!condition_mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (condition_mat->size[0] != target->size[0] || condition_mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != if_mat->size[0] || condition_mat->size[1] != if_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != else_mat->size[0] || condition_mat->size[1] != else_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kWhere<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(condition_mat->data_device,
if_mat->data_device, else_mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
}
|
1e5e4e5b2ef8371e449c81aa33baa71573ef49f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//mult, add and FMA Kernels work for FP32, FP64, and Int
// without register limiting
#include <stdio.h>
//#include "arithmeticTests.h"
//------------ EXPERIMENT VOLATILE KERNEL FOR BASEPOWER 2 ---------
template <typename T>
__global__
void addKernel1Volatile(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// volatile T b = 2;
// volatile T c = 2;
volatile T b = 1;
volatile T c = 1;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
template <typename T>
__global__
void multKernel1_nonVolitile(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
//------------ BASEPOW1: SET SHARED MEMORY KERNEL ---------
template <typename T>
__global__
void addKernel1_DynamicSharedMem(int n, int iterateNum, T *x) {
extern __shared__ int s[];
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
template <typename T>
__global__
void multKernel_DynamicSharedMem(int n, int iterateNum, T *x) {
extern __shared__ int s[];
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
// T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
template <typename T>
__global__
void fmaKernel_DynamicSharedMem(int n, int iterateNum, T *x) {
extern __shared__ int s[];
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
// T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
// T b = 1, c = 1;
T b = 0.25, c = 0.25, d = 0.1875;
// T b = 0, c = 0;
for (int i = 0; i < iterateNum; i++) {
// b = a*b + c;
// c = a*b + a;
// a = c*b + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
b = a*b + d;
c = a*b + d;
a = c*b + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
}
x[thread] = a;
}
//------------ ADDITION KERNELS ---------
template <typename T>
__global__
void addKernel1(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
template <typename T>
__global__
void addKernel2(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
//------------ MULTIPLICATION KERNELS ---------
template <typename T>
__global__
void multKernel1(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
// T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
template <typename T>
__global__
void multKernel2(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
// T b = 0, c = 0;
T b = 1, c = 1;
// T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
//------------ FMA KERNELS ---------
template <typename T>
__global__
void fmaKernel1(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
//T a = x[thread];
//float b = 1.175494351e+38f, c = 1.175494351e+38f;
//float b=1f, c=1f;
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 0.25, c = 0.25, d = 0.1875;
// T b = 0, c = 0;
for (int i = 0; i < iterateNum; i++) {
// b = a*b + c;
// c = a*b + a;
// a = c*b + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
b = a*b + d;
c = a*b + d;
a = c*b + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
}
x[thread] = a;
}
template <typename T>
__global__
void fmaKernel2(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
// T a = x[thread];
//float b = 1.175494351e+38f, c = 1.175494351e+38f;
//T b = 2.22507e-308, c = 2.22507e-308;
T b = 0.25, c = 0.25, d = 0.1875;
// T b = 0, c = 0;
for (int i = 0; i < iterateNum; i++) {
b = a*b + d;
c = a*b + d;
a = c*b + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
}
x[thread] = a;
}
template <typename T>
__global__
void createData(int n, T val, T *x) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// T a = 1.175494351e-38f;
//T a = 1.0e+38f;
// T a = 0;
// T a = 2.22507e-308;
// T a = 0.25;
// T a = 1.0;
if (i < n) {
x[i] = val;
}
}
//------------ BASE CLASS FOR TESTS TO INHERIT FROM ---------
template <typename T>
class ArithmeticTestBase {
public:
T *d_x;
int n;
T arrayInitVal;
int iterNum;
int numBlocks;
int blockSize;
int numBlockScale;
int opsPerIteration; //number of operations in one iteration. Not including loop calculations
ArithmeticTestBase(int blockSize, int iterNum)
: iterNum(iterNum), blockSize(blockSize), numBlockScale(360)
{
opsPerIteration = 0;
arrayInitVal = (T) 1;
}
ArithmeticTestBase(int blockSize, int iterNum, int numBlockScale)
: iterNum(iterNum), blockSize(blockSize), numBlockScale(numBlockScale)
{
opsPerIteration = 0;
arrayInitVal = (T) 1;
}
~ArithmeticTestBase() {
CUDA_ERROR( hipFree(d_x) );
}
void kernelSetup(hipDeviceProp_t deviceProp) {
numBlocks = deviceProp.multiProcessorCount * numBlockScale;
n = numBlocks * blockSize;
CUDA_ERROR( hipMalloc(&d_x, n*sizeof(T)) );
hipLaunchKernelGGL(( createData<T>), dim3(numBlocks), dim3(blockSize), 0, 0, n, arrayInitVal, d_x);
}
//get the number of threads launched in the kernel. Must be
//called after kernelSetup() or the neccisary fields may not be initialized
int getNumThreads() {
return numBlocks * blockSize;
}
//return the number of operations that are executed in the kernel's loop
//for the specified number of operations.
//Ex: 6 operations per iteration * 1000000 iterations = 6000000 operations
int getOpsPerThread() {
return opsPerIteration * iterNum;
}
void runKernel();
void CUDA_ERROR(hipError_t e) {
if (e != hipSuccess) {
printf("cuda error in test class: \"%s\"\n", hipGetErrorString(e));
}
}
};
//------------ TEST CLASS FOR BASE POWER MEASUREMENT APPR 1 ---------
template <typename T>
class AddKernel1TestSetSharedMem : public ArithmeticTestBase<T> {
public:
unsigned int sharedMemRequest;
float sharedMemScale;
AddKernel1TestSetSharedMem(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
AddKernel1TestSetSharedMem(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
//in addition to normal setup, figure out how much shared memory to request
void kernelSetup(hipDeviceProp_t deviceProp) {
ArithmeticTestBase<T>::kernelSetup(deviceProp);
sharedMemRequest = (unsigned int) (deviceProp.sharedMemPerBlock*sharedMemScale);
}
void setSharedMem(float newScale) {
sharedMemScale = newScale;
}
void runKernel() {
hipLaunchKernelGGL(( addKernel1_DynamicSharedMem<T>), dim3(this->numBlocks), dim3(this->blockSize), sharedMemRequest, 0,
this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class MultKernel1TestSetSharedMem : public ArithmeticTestBase<T> {
public:
unsigned int sharedMemRequest;
float sharedMemScale;
MultKernel1TestSetSharedMem(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
MultKernel1TestSetSharedMem(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
//in addition to normal setup, figure out how much shared memory to request
void kernelSetup(hipDeviceProp_t deviceProp) {
ArithmeticTestBase<T>::kernelSetup(deviceProp);
sharedMemRequest = (unsigned int) (deviceProp.sharedMemPerBlock*sharedMemScale);
}
void setSharedMem(float newScale) {
sharedMemScale = newScale;
}
void runKernel() {
hipLaunchKernelGGL(( multKernel_DynamicSharedMem<T>), dim3(this->numBlocks), dim3(this->blockSize), sharedMemRequest, 0,
this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class FMAKernel1TestSetSharedMem : public ArithmeticTestBase<T> {
public:
unsigned int sharedMemRequest;
float sharedMemScale;
FMAKernel1TestSetSharedMem(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
FMAKernel1TestSetSharedMem(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
//in addition to normal setup, figure out how much shared memory to request
void kernelSetup(hipDeviceProp_t deviceProp) {
ArithmeticTestBase<T>::kernelSetup(deviceProp);
sharedMemRequest = (unsigned int) (deviceProp.sharedMemPerBlock*sharedMemScale);
}
void setSharedMem(float newScale) {
sharedMemScale = newScale;
}
void runKernel() {
hipLaunchKernelGGL(( fmaKernel_DynamicSharedMem<T>), dim3(this->numBlocks), dim3(this->blockSize), sharedMemRequest, 0,
this->n, this->iterNum, this->d_x);
}
};
//------------ TEST CASE FOR BASE POWER APPR 2 ---------
template <typename T>
class AddKernel1TestVolatile : public ArithmeticTestBase<T> {
public:
AddKernel1TestVolatile(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
AddKernel1TestVolatile(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
hipLaunchKernelGGL(( addKernel1Volatile<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class MultKernel1TestNonVolatile : public ArithmeticTestBase<T> {
public:
MultKernel1TestNonVolatile(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
MultKernel1TestNonVolatile(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
hipLaunchKernelGGL(( multKernel1_nonVolitile<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
//------------ ADDITION TEST CLASSES ---------
template <typename T>
class AddKernel1Test : public ArithmeticTestBase<T> {
public:
//this->opsPerIteration = 6;
AddKernel1Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
AddKernel1Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
hipLaunchKernelGGL(( addKernel1<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class AddKernel2Test : public ArithmeticTestBase<T> {
public:
AddKernel2Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 9;}
AddKernel2Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 9;}
void runKernel() {
hipLaunchKernelGGL(( addKernel2<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
//------------ MULTIPLICATION TEST CLASSES ---------
template <typename T>
class MultKernel1Test : public ArithmeticTestBase<T> {
public:
MultKernel1Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
MultKernel1Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
hipLaunchKernelGGL(( multKernel1<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class MultKernel2Test : public ArithmeticTestBase<T> {
public:
MultKernel2Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 9;}
MultKernel2Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 9;}
void runKernel() {
hipLaunchKernelGGL(( multKernel2<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
//------------ FMA TEST CLASSES ---------
template <typename T>
class FmaKernel1Test : public ArithmeticTestBase<T> {
public:
FmaKernel1Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
FmaKernel1Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
void runKernel() {
hipLaunchKernelGGL(( fmaKernel1<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class FmaKernel2Test : public ArithmeticTestBase<T> {
public:
FmaKernel2Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{
this->opsPerIteration = 12;
this->arrayInitVal = (T) 0.25;
}
FmaKernel2Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{
this->opsPerIteration = 12;
this->arrayInitVal = (T) 0.25;
}
void runKernel() {
hipLaunchKernelGGL(( fmaKernel2<T>), dim3(this->numBlocks), dim3(this->blockSize), 0, 0, this->n, this->iterNum, this->d_x);
}
};
| 1e5e4e5b2ef8371e449c81aa33baa71573ef49f8.cu |
//mult, add and FMA Kernels work for FP32, FP64, and Int
// without register limiting
#include <stdio.h>
//#include "arithmeticTests.h"
//------------ EXPERIMENT VOLATILE KERNEL FOR BASEPOWER 2 ---------
template <typename T>
__global__
void addKernel1Volatile(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// volatile T b = 2;
// volatile T c = 2;
volatile T b = 1;
volatile T c = 1;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
template <typename T>
__global__
void multKernel1_nonVolitile(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
//------------ BASEPOW1: SET SHARED MEMORY KERNEL ---------
template <typename T>
__global__
void addKernel1_DynamicSharedMem(int n, int iterateNum, T *x) {
extern __shared__ int s[];
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
template <typename T>
__global__
void multKernel_DynamicSharedMem(int n, int iterateNum, T *x) {
extern __shared__ int s[];
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
// T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
template <typename T>
__global__
void fmaKernel_DynamicSharedMem(int n, int iterateNum, T *x) {
extern __shared__ int s[];
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
// T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
// T b = 1, c = 1;
T b = 0.25, c = 0.25, d = 0.1875;
// T b = 0, c = 0;
for (int i = 0; i < iterateNum; i++) {
// b = a*b + c;
// c = a*b + a;
// a = c*b + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
b = a*b + d;
c = a*b + d;
a = c*b + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
}
x[thread] = a;
}
//------------ ADDITION KERNELS ---------
template <typename T>
__global__
void addKernel1(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
template <typename T>
__global__
void addKernel2(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
//T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a + b;
c = a + b;
a = c + a;
c = b + a;
b = c + a;
a = b + c;
c = b + a;
b = c + a;
a = b + c;
}
x[thread] = a;
}
//------------ MULTIPLICATION KERNELS ---------
template <typename T>
__global__
void multKernel1(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 1, c = 1;
// T b = 0, c = 0;
// T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
template <typename T>
__global__
void multKernel2(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
T a = x[thread];
// T b = 2.22507e-308, c = 2.22507e-308;
// T b = 0, c = 0;
T b = 1, c = 1;
// T b = 2, c = 2;
for (int i = 0; i < iterateNum; i++) {
b = a * b;
c = a * b;
a = c * a;
c = b * a;
b = c * a;
a = b * c;
c = b * a;
b = c * a;
a = b * c;
}
x[thread] = a;
}
//------------ FMA KERNELS ---------
template <typename T>
__global__
void fmaKernel1(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
//T a = x[thread];
//float b = 1.175494351e+38f, c = 1.175494351e+38f;
//float b=1f, c=1f;
// T b = 2.22507e-308, c = 2.22507e-308;
T b = 0.25, c = 0.25, d = 0.1875;
// T b = 0, c = 0;
for (int i = 0; i < iterateNum; i++) {
// b = a*b + c;
// c = a*b + a;
// a = c*b + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
// c = b*a + c;
// b = a*b + b;
// a = b*c + a;
b = a*b + d;
c = a*b + d;
a = c*b + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
}
x[thread] = a;
}
template <typename T>
__global__
void fmaKernel2(int n, int iterateNum, T *x) {
int thread = blockIdx.x*blockDim.x + threadIdx.x;
volatile T aInit = x[thread];
T a = aInit;
// T a = x[thread];
//float b = 1.175494351e+38f, c = 1.175494351e+38f;
//T b = 2.22507e-308, c = 2.22507e-308;
T b = 0.25, c = 0.25, d = 0.1875;
// T b = 0, c = 0;
for (int i = 0; i < iterateNum; i++) {
b = a*b + d;
c = a*b + d;
a = c*b + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
c = b*a + d;
b = a*c + d;
a = b*c + d;
}
x[thread] = a;
}
template <typename T>
__global__
void createData(int n, T val, T *x) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// T a = 1.175494351e-38f;
//T a = 1.0e+38f;
// T a = 0;
// T a = 2.22507e-308;
// T a = 0.25;
// T a = 1.0;
if (i < n) {
x[i] = val;
}
}
//------------ BASE CLASS FOR TESTS TO INHERIT FROM ---------
template <typename T>
class ArithmeticTestBase {
public:
T *d_x;
int n;
T arrayInitVal;
int iterNum;
int numBlocks;
int blockSize;
int numBlockScale;
int opsPerIteration; //number of operations in one iteration. Not including loop calculations
ArithmeticTestBase(int blockSize, int iterNum)
: iterNum(iterNum), blockSize(blockSize), numBlockScale(360)
{
opsPerIteration = 0;
arrayInitVal = (T) 1;
}
ArithmeticTestBase(int blockSize, int iterNum, int numBlockScale)
: iterNum(iterNum), blockSize(blockSize), numBlockScale(numBlockScale)
{
opsPerIteration = 0;
arrayInitVal = (T) 1;
}
~ArithmeticTestBase() {
CUDA_ERROR( cudaFree(d_x) );
}
void kernelSetup(cudaDeviceProp deviceProp) {
numBlocks = deviceProp.multiProcessorCount * numBlockScale;
n = numBlocks * blockSize;
CUDA_ERROR( cudaMalloc(&d_x, n*sizeof(T)) );
createData<T><<<numBlocks, blockSize>>>(n, arrayInitVal, d_x);
}
//get the number of threads launched in the kernel. Must be
//called after kernelSetup() or the neccisary fields may not be initialized
int getNumThreads() {
return numBlocks * blockSize;
}
//return the number of operations that are executed in the kernel's loop
//for the specified number of operations.
//Ex: 6 operations per iteration * 1000000 iterations = 6000000 operations
int getOpsPerThread() {
return opsPerIteration * iterNum;
}
void runKernel();
void CUDA_ERROR(cudaError_t e) {
if (e != cudaSuccess) {
printf("cuda error in test class: \"%s\"\n", cudaGetErrorString(e));
}
}
};
//------------ TEST CLASS FOR BASE POWER MEASUREMENT APPR 1 ---------
template <typename T>
class AddKernel1TestSetSharedMem : public ArithmeticTestBase<T> {
public:
unsigned int sharedMemRequest;
float sharedMemScale;
AddKernel1TestSetSharedMem(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
AddKernel1TestSetSharedMem(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
//in addition to normal setup, figure out how much shared memory to request
void kernelSetup(cudaDeviceProp deviceProp) {
ArithmeticTestBase<T>::kernelSetup(deviceProp);
sharedMemRequest = (unsigned int) (deviceProp.sharedMemPerBlock*sharedMemScale);
}
void setSharedMem(float newScale) {
sharedMemScale = newScale;
}
void runKernel() {
addKernel1_DynamicSharedMem<T><<<this->numBlocks, this->blockSize, sharedMemRequest>>>
(this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class MultKernel1TestSetSharedMem : public ArithmeticTestBase<T> {
public:
unsigned int sharedMemRequest;
float sharedMemScale;
MultKernel1TestSetSharedMem(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
MultKernel1TestSetSharedMem(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
//in addition to normal setup, figure out how much shared memory to request
void kernelSetup(cudaDeviceProp deviceProp) {
ArithmeticTestBase<T>::kernelSetup(deviceProp);
sharedMemRequest = (unsigned int) (deviceProp.sharedMemPerBlock*sharedMemScale);
}
void setSharedMem(float newScale) {
sharedMemScale = newScale;
}
void runKernel() {
multKernel_DynamicSharedMem<T><<<this->numBlocks, this->blockSize, sharedMemRequest>>>
(this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class FMAKernel1TestSetSharedMem : public ArithmeticTestBase<T> {
public:
unsigned int sharedMemRequest;
float sharedMemScale;
FMAKernel1TestSetSharedMem(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
FMAKernel1TestSetSharedMem(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
//in addition to normal setup, figure out how much shared memory to request
void kernelSetup(cudaDeviceProp deviceProp) {
ArithmeticTestBase<T>::kernelSetup(deviceProp);
sharedMemRequest = (unsigned int) (deviceProp.sharedMemPerBlock*sharedMemScale);
}
void setSharedMem(float newScale) {
sharedMemScale = newScale;
}
void runKernel() {
fmaKernel_DynamicSharedMem<T><<<this->numBlocks, this->blockSize, sharedMemRequest>>>
(this->n, this->iterNum, this->d_x);
}
};
//------------ TEST CASE FOR BASE POWER APPR 2 ---------
template <typename T>
class AddKernel1TestVolatile : public ArithmeticTestBase<T> {
public:
AddKernel1TestVolatile(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
AddKernel1TestVolatile(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
addKernel1Volatile<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class MultKernel1TestNonVolatile : public ArithmeticTestBase<T> {
public:
MultKernel1TestNonVolatile(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
MultKernel1TestNonVolatile(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
multKernel1_nonVolitile<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
//------------ ADDITION TEST CLASSES ---------
template <typename T>
class AddKernel1Test : public ArithmeticTestBase<T> {
public:
//this->opsPerIteration = 6;
AddKernel1Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
AddKernel1Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
addKernel1<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class AddKernel2Test : public ArithmeticTestBase<T> {
public:
AddKernel2Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 9;}
AddKernel2Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 9;}
void runKernel() {
addKernel2<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
//------------ MULTIPLICATION TEST CLASSES ---------
template <typename T>
class MultKernel1Test : public ArithmeticTestBase<T> {
public:
MultKernel1Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 6;}
MultKernel1Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 6;}
void runKernel() {
multKernel1<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class MultKernel2Test : public ArithmeticTestBase<T> {
public:
MultKernel2Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{this->opsPerIteration = 9;}
MultKernel2Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{this->opsPerIteration = 9;}
void runKernel() {
multKernel2<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
//------------ FMA TEST CLASSES ---------
template <typename T>
class FmaKernel1Test : public ArithmeticTestBase<T> {
public:
FmaKernel1Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
FmaKernel1Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{
this->opsPerIteration = 9;
this->arrayInitVal = (T) 0.25;
}
void runKernel() {
fmaKernel1<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
template <typename T>
class FmaKernel2Test : public ArithmeticTestBase<T> {
public:
FmaKernel2Test(int blockSize, int iterNum)
: ArithmeticTestBase<T>(blockSize, iterNum)
{
this->opsPerIteration = 12;
this->arrayInitVal = (T) 0.25;
}
FmaKernel2Test(int blockSize, int iterNum, int numBlockScale)
: ArithmeticTestBase<T>(blockSize, iterNum, numBlockScale)
{
this->opsPerIteration = 12;
this->arrayInitVal = (T) 0.25;
}
void runKernel() {
fmaKernel2<T><<<this->numBlocks, this->blockSize>>>(this->n, this->iterNum, this->d_x);
}
};
|
ca7d5d1320c5d1ad8c46ca8e8dea0f04fcc8e54d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA Convolution Library
*
* v1.0.0
*
* C.S. Coban
*
*/
#include <stdio.h>
#include <string.h>
//Cuda Libs
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
#include "device_launch_parameters.h"
//OpenCV Libs
#include "opencv2/opencv.hpp"
#include "helperLibs/helper_cuda.h"
#include "helperLibs/helper_functions.h"
//CUDA Error Checker
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if(err!=hipSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
//Allocate filter kernel in constant memory on device
__constant__ int filter_kernel[] =
{
1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1,
};
//Set block size
#define BLOCK_SIZE 16
/* KERNELS */
__global__ void conv_globalMem( unsigned char* input,
unsigned char* output,
int width,
int height,
int inputWidthStep,
int outputWidthStep,
int radius, int weight){
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex < width) && (yIndex < height)){
//Allocate values
int3 acc = make_int3(0,0,0);
int3 val = make_int3(0,0,0);
int output_tid = yIndex * outputWidthStep + (3 * xIndex);
for (int i = -radius; i <= radius; i++) {
for (int j = -radius; j <= radius; j++) {
//Skip violations (which will lead to zero by default
if ((xIndex + i < 0) || (xIndex + i >= width) || (yIndex + j < 0) || (yIndex + j >= height)) continue;
//Get kernel value
int temp = filter_kernel[i + radius + (j+radius)*((radius << 1) + 1)];
//Location of colored pixel in input
int input_tid = (yIndex + j) * inputWidthStep + (3 * (xIndex + i));
//Fetch the three channel values
const unsigned char blue = input[input_tid];
const unsigned char green = input[input_tid + 1];
const unsigned char red = input[input_tid + 2];
val.x = int(blue)*temp;
val.y = int(green)*temp;
val.z = int(red)*temp;
//Perform cumulative sum
acc.x += val.x;
acc.y += val.y;
acc.z += val.z;
}
}
acc.x = acc.x/weight;
acc.y = acc.y/weight;
acc.z = acc.z/weight;
output[output_tid] = static_cast<unsigned char>(acc.x);
output[output_tid + 1] = static_cast<unsigned char>(acc.y);
output[output_tid + 2] = static_cast<unsigned char>(acc.z);
}
} //end of convGlobal
/*Convolution Wrapper*/
void convolution( const cv::Mat& input, cv::Mat& output ){
//Calculate the bytes to be transferred
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
//Instantiate device pointers
unsigned char *d_input, *d_output;
//Allocate device memory
SAFE_CALL(hipMalloc<unsigned char>(&d_input,inputBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output,outputBytes),"CUDA Malloc Failed");
//Calculate required threads and gridSize size to cover the whole image
//Specify a reasonable blockSize size
const dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); //16x16 threads = 256 thread per block
//Calculate gridSize size to cover the whole image
const dim3 gridSize((input.cols + blockSize.x - 1)/blockSize.x, (input.rows + blockSize.y - 1)/blockSize.y);
SAFE_CALL(hipMemcpy(d_input,input.ptr(),inputBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Launch the convolution kernel
int radius = 2;
int weight = 256;
hipLaunchKernelGGL(( conv_globalMem), dim3(gridSize),dim3(blockSize), 0, 0, d_input,d_output,input.cols,input.rows,input.step,output.step, radius, weight);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device memory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(),d_output,outputBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(hipFree(d_input),"CUDA Free Failed");
SAFE_CALL(hipFree(d_output),"CUDA Free Failed");
}
| ca7d5d1320c5d1ad8c46ca8e8dea0f04fcc8e54d.cu | /*
* CUDA Convolution Library
*
* v1.0.0
*
* C.S. Coban
*
*/
#include <stdio.h>
#include <string.h>
//Cuda Libs
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector_types.h>
#include "device_launch_parameters.h"
//OpenCV Libs
#include "opencv2/opencv.hpp"
#include "helperLibs/helper_cuda.h"
#include "helperLibs/helper_functions.h"
//CUDA Error Checker
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if(err!=cudaSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
//Allocate filter kernel in constant memory on device
__constant__ int filter_kernel[] =
{
1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1,
};
//Set block size
#define BLOCK_SIZE 16
/* KERNELS */
__global__ void conv_globalMem( unsigned char* input,
unsigned char* output,
int width,
int height,
int inputWidthStep,
int outputWidthStep,
int radius, int weight){
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex < width) && (yIndex < height)){
//Allocate values
int3 acc = make_int3(0,0,0);
int3 val = make_int3(0,0,0);
int output_tid = yIndex * outputWidthStep + (3 * xIndex);
for (int i = -radius; i <= radius; i++) {
for (int j = -radius; j <= radius; j++) {
//Skip violations (which will lead to zero by default
if ((xIndex + i < 0) || (xIndex + i >= width) || (yIndex + j < 0) || (yIndex + j >= height)) continue;
//Get kernel value
int temp = filter_kernel[i + radius + (j+radius)*((radius << 1) + 1)];
//Location of colored pixel in input
int input_tid = (yIndex + j) * inputWidthStep + (3 * (xIndex + i));
//Fetch the three channel values
const unsigned char blue = input[input_tid];
const unsigned char green = input[input_tid + 1];
const unsigned char red = input[input_tid + 2];
val.x = int(blue)*temp;
val.y = int(green)*temp;
val.z = int(red)*temp;
//Perform cumulative sum
acc.x += val.x;
acc.y += val.y;
acc.z += val.z;
}
}
acc.x = acc.x/weight;
acc.y = acc.y/weight;
acc.z = acc.z/weight;
output[output_tid] = static_cast<unsigned char>(acc.x);
output[output_tid + 1] = static_cast<unsigned char>(acc.y);
output[output_tid + 2] = static_cast<unsigned char>(acc.z);
}
} //end of convGlobal
/*Convolution Wrapper*/
void convolution( const cv::Mat& input, cv::Mat& output ){
//Calculate the bytes to be transferred
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
//Instantiate device pointers
unsigned char *d_input, *d_output;
//Allocate device memory
SAFE_CALL(cudaMalloc<unsigned char>(&d_input,inputBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output,outputBytes),"CUDA Malloc Failed");
//Calculate required threads and gridSize size to cover the whole image
//Specify a reasonable blockSize size
const dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); //16x16 threads = 256 thread per block
//Calculate gridSize size to cover the whole image
const dim3 gridSize((input.cols + blockSize.x - 1)/blockSize.x, (input.rows + blockSize.y - 1)/blockSize.y);
SAFE_CALL(cudaMemcpy(d_input,input.ptr(),inputBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Launch the convolution kernel
int radius = 2;
int weight = 256;
conv_globalMem<<<gridSize,blockSize>>>(d_input,d_output,input.cols,input.rows,input.step,output.step, radius, weight);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device memory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(),d_output,outputBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(cudaFree(d_input),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_output),"CUDA Free Failed");
}
|
dbaad3546e24d4abd696464cfeaadaed7c7c2239.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <hip/hip_runtime_api.h>
#include "timer.c"
/*********************************************************
*
*
* To Compile:
* nvcc -o cuda_image_processing cuda_image_processing.cu -lGL -lglut
*
*
* To Run:
* ./cuda_image_processing
*
*
*********************************************************/
#define img_width 100
#define img_height 72
unsigned char image_data[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
unsigned char results[img_width * img_height];
//static void key_pressed(unsigned char key, int x, int y);
//void stgint callback(int signal_number);
//static void display();
//void tidy_and_exit();
__global__ void detect_image_edges(unsigned char *input, unsigned char *output) {
unsigned int loop = blockIdx.x ;
int x_pix, y_pix; // pixel for x and y co-ordinates
int pix_b, pix_d, pix_f, pix_h; // adjacent pixels to x and y
int res; // calculation result
y_pix = loop / 100;
x_pix = loop - (100 * y_pix);
if (x_pix == 0 || y_pix == 0 || x_pix == img_width - 1 || y_pix == img_height - 1) {
output[loop] = 0;
} else {
pix_b = loop + 100;
pix_d = loop - 1;
pix_f = loop + 1;
pix_h = loop - 100;
res = (input[loop] * 4) + (input[pix_b] * -1) + (input[pix_d] * -1) + (input[pix_f] * -1) + (input[pix_h] * -1);
if (res > 0) { // if the result is positive this is an edge pixel
output[loop] = 255;
} else {
output[loop] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(img_width, img_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image_data);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(img_width, img_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int main(int argc, char **argv) {
// for time (start)
struct timespec timer_start, timer_stop;
long long int time_taken_for_execution;
clock_gettime(CLOCK_MONOTONIC, &timer_start);
printf("\n===============================================================================\n");
printf("!! IMAGE PROCESSING !! \n");
printf("===============================================================================\n\n");
signal(SIGINT, sigint_callback);
printf("image dimensions %dx%d", img_width, img_height);
unsigned char *d_results;
unsigned char *d_image_data;
hipMalloc((void**)&d_results, sizeof(unsigned char) * (img_width * img_height));
hipMalloc((void**)&d_image_data, sizeof(unsigned char) * (img_width * img_height) );
hipMemcpy(d_image_data, &image_data, sizeof(unsigned char) * (img_width * img_height), hipMemcpyHostToDevice);
hipMemcpy(&d_results, &results, sizeof(unsigned char) * (img_width * img_height), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( detect_image_edges) , dim3(7200), dim3(1), 0, 0, d_image_data, d_results);
hipDeviceSynchronize();
// for time (end)
clock_gettime(CLOCK_MONOTONIC, &timer_stop);
timer_calc(&timer_start, &timer_stop, &time_taken_for_execution);
// output of time taken for execution is displayed
printf("\n\n===============================================================================\n");
printf("!! TIME TAKEN FOR EXECUTION !! \n");
printf("===============================================================================\n\n");
printf("Nanoseconds: %lld\n", time_taken_for_execution);
printf("Seconds: %0.9lf\n\n", ((time_taken_for_execution/1.0e9)));
//printf("Minutes: %0.4lf\n", ((time_taken_for_execution/1.0e9)/60));
//printf("Hours: %0.2lf\n\n", ((time_taken_for_execution/1.0e9)/3600));
hipMemcpy(&results, d_results, sizeof(unsigned char) * (img_width * img_height), hipMemcpyDeviceToHost);
hipMemcpy(&image_data, &d_image_data, sizeof(unsigned char) * (img_width * img_height), hipMemcpyDeviceToHost);
hipFree(&d_image_data);
hipFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(img_width * 2, img_height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("Srijay Tuladhar: 6CS005");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
| dbaad3546e24d4abd696464cfeaadaed7c7c2239.cu | #include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <cuda_runtime_api.h>
#include "timer.c"
/*********************************************************
*
*
* To Compile:
* nvcc -o cuda_image_processing cuda_image_processing.cu -lGL -lglut
*
*
* To Run:
* ./cuda_image_processing
*
*
*********************************************************/
#define img_width 100
#define img_height 72
unsigned char image_data[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
unsigned char results[img_width * img_height];
//static void key_pressed(unsigned char key, int x, int y);
//void stgint callback(int signal_number);
//static void display();
//void tidy_and_exit();
__global__ void detect_image_edges(unsigned char *input, unsigned char *output) {
unsigned int loop = blockIdx.x ;
int x_pix, y_pix; // pixel for x and y co-ordinates
int pix_b, pix_d, pix_f, pix_h; // adjacent pixels to x and y
int res; // calculation result
y_pix = loop / 100;
x_pix = loop - (100 * y_pix);
if (x_pix == 0 || y_pix == 0 || x_pix == img_width - 1 || y_pix == img_height - 1) {
output[loop] = 0;
} else {
pix_b = loop + 100;
pix_d = loop - 1;
pix_f = loop + 1;
pix_h = loop - 100;
res = (input[loop] * 4) + (input[pix_b] * -1) + (input[pix_d] * -1) + (input[pix_f] * -1) + (input[pix_h] * -1);
if (res > 0) { // if the result is positive this is an edge pixel
output[loop] = 255;
} else {
output[loop] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(img_width, img_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image_data);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(img_width, img_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int main(int argc, char **argv) {
// for time (start)
struct timespec timer_start, timer_stop;
long long int time_taken_for_execution;
clock_gettime(CLOCK_MONOTONIC, &timer_start);
printf("\n===============================================================================\n");
printf("!! IMAGE PROCESSING !! \n");
printf("===============================================================================\n\n");
signal(SIGINT, sigint_callback);
printf("image dimensions %dx%d", img_width, img_height);
unsigned char *d_results;
unsigned char *d_image_data;
cudaMalloc((void**)&d_results, sizeof(unsigned char) * (img_width * img_height));
cudaMalloc((void**)&d_image_data, sizeof(unsigned char) * (img_width * img_height) );
cudaMemcpy(d_image_data, &image_data, sizeof(unsigned char) * (img_width * img_height), cudaMemcpyHostToDevice);
cudaMemcpy(&d_results, &results, sizeof(unsigned char) * (img_width * img_height), cudaMemcpyHostToDevice);
detect_image_edges <<<7200, 1>>>(d_image_data, d_results);
cudaThreadSynchronize();
// for time (end)
clock_gettime(CLOCK_MONOTONIC, &timer_stop);
timer_calc(&timer_start, &timer_stop, &time_taken_for_execution);
// output of time taken for execution is displayed
printf("\n\n===============================================================================\n");
printf("!! TIME TAKEN FOR EXECUTION !! \n");
printf("===============================================================================\n\n");
printf("Nanoseconds: %lld\n", time_taken_for_execution);
printf("Seconds: %0.9lf\n\n", ((time_taken_for_execution/1.0e9)));
//printf("Minutes: %0.4lf\n", ((time_taken_for_execution/1.0e9)/60));
//printf("Hours: %0.2lf\n\n", ((time_taken_for_execution/1.0e9)/3600));
cudaMemcpy(&results, d_results, sizeof(unsigned char) * (img_width * img_height), cudaMemcpyDeviceToHost);
cudaMemcpy(&image_data, &d_image_data, sizeof(unsigned char) * (img_width * img_height), cudaMemcpyDeviceToHost);
cudaFree(&d_image_data);
cudaFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(img_width * 2, img_height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("Srijay Tuladhar: 6CS005");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
|
2857a87f8dcb24d4c98d04bffc1f5d812092f8ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/im2col.cuh>
#include <ATen/native/hip/vol2col.cuh>
#include <ATen/native/DilatedConvolutionUtils.h>
#include <c10/util/accumulate.h>
#include <tuple>
namespace at {
namespace native {
namespace {
// hyper-volume to column, CUDA
template <typename Dtype, int64_t dim>
void hvol2col(
hipStream_t stream,
const Dtype* data_hvol,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_col) {
if (dim == 3) {
vol2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_col);
}
if (dim == 2) {
im2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_col);
}
}
// column to hyper-volume, CUDA
template <typename Dtype, int64_t dim>
void col2hvol(
hipStream_t stream,
const Dtype* data_col,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_hvol) {
if (dim == 3) {
col2vol<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_hvol);
}
if (dim == 2) {
col2im<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_hvol);
}
}
/*
check tensor data locations
*/
void slow_conv_dilated_location_check(
CheckedFrom c,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output) {
// checking data locations of user-provided tensor arguments
TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3},
bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5};
checkAllSameGPU(c, {input_arg, weight_arg});
if (bias.defined()) {
checkAllSameGPU(c, {input_arg, bias_arg});
}
if (grad_output.defined()) {
checkAllSameGPU(c, {input_arg, grad_output_arg});
}
// we are not checking the data locations of other tensor
// arguments such as output, grad_input, etc because of these are
// allocated based on input options and hence these tensors always
// have the same data location as of input tensor.
}
/*
slow_conv_dilated_all_cuda_template
Main worker. Computes tensors output, grad_input, grad_weight,
and/or grad_bias if defined, respectively.
*/
template <int64_t dim>
void slow_conv_dilated_all_cuda_template(
Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
slow_conv_dilated_location_check(__func__, input, weight, bias, grad_output);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto options = input.options();
// The rear part of input tensor sizes:
auto input_size = input.sizes().slice(2);
// The rear part of output tensor sizes:
auto output_size = internal::get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
int64_t batchSize = input.size(0);
int64_t nInputPlane = weight.size(1);
int64_t nOutputPlane = weight.size(0);
// Temporary buffers:
const int64_t m = c10::multiply_integers(kernel_size);
const int64_t output_vsize = c10::multiply_integers(output_size);
Tensor columns = at::empty({0}, options);
if (output.defined() || grad_weight.defined() || grad_input.defined()) {
columns.resize_({nInputPlane * m, output_vsize});
}
// Initialize
if (grad_weight.defined()) {
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.zero_();
}
if (output.defined() && !bias.defined()) {
output.zero_();
}
#if defined(USE_ROCM)
/* When using ROCm, the sum evaluation is inaccurate for double
tensors. The reason is currently unknown. Hence, we use gemv for
computing `grad_output_n.sum(dims)` until the ROCm-sum issue is
resolved. */
Tensor ones = at::empty({0}, options);
if (grad_bias.defined()) {
ones.resize_({output_vsize});
ones.fill_(1);
}
/* MSVC does not like #ifdef-s inside the CPP macro
AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code
branching outside the CPP macro: */
#define CALCULATE_GRAD_BIAS \
at::cuda::blas::gemv<scalar_t>( \
/*trans=*/'t', \
/* m=*/output_vsize, \
/* n=*/nOutputPlane, \
/*alpha=*/static_cast<scalar_t>(1), \
/* A=*/grad_output_n.data_ptr<scalar_t>(), \
/* lda=*/output_vsize, \
/* x=*/ones.data_ptr<scalar_t>(), \
/* incx=*/1, \
/* beta=*/static_cast<scalar_t>(1), \
/* y=*/grad_bias.data_ptr<scalar_t>(), \
/* incy=*/1)
#else
#define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims)
#endif
// Helpers
Tensor grad_output_n;
std::vector<int64_t> dims(dim);
std::iota(dims.begin(), dims.end(), 1);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_dilated<>", [&] {
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix multiply per output:
Tensor input_n = input.select(0, elt);
// Output
if (output.defined()) {
Tensor output_n = output.select(0, elt);
if (bias.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
for (int n = 0; n < nOutputPlane; n++) {
output_n.select(0, n).fill_(bias[n]);
}
}
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'n',
/* m=*/columns.size(1),
/* n=*/nOutputPlane,
/* k=*/columns.size(0),
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/output_n.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
} else {
// All gradients
grad_output_n = grad_output.select(0, elt);
}
// Gradient of input:
if (grad_input.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'t',
/* m=*/columns.size(1),
/* n=*/columns.size(0),
/* k=*/nOutputPlane,
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/grad_output_n.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(0),
/* C=*/columns.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
// Unpack columns back into input:
Tensor grad_input_n = grad_input.select(0, elt);
col2hvol<scalar_t, dim>(
stream,
columns.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
grad_input_n.data_ptr<scalar_t>());
}
// Gradient of weight:
if (grad_weight.defined()) {
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
scalar_t scale = static_cast<scalar_t>(
1); // TODO: expose as argument?
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'t',
/*transb=*/'n',
/* m=*/columns.size(0),
/* n=*/nOutputPlane,
/* k=*/columns.size(1),
/* alpha=*/scale,
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/grad_output_n.data_ptr<scalar_t>(),
/* ldb=*/columns.size(1),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/grad_weight.data_ptr<scalar_t>(),
/* ldc=*/columns.size(0));
}
// Gradient of bias:
if (grad_bias.defined()) {
/* For gemv argument derivation, see
slow_conv_dilated_all_cpu_template in
ATen/native/DilatedConvolution.cpp */
CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s
inside the CPP macros, see above. */
/*
TODO: when scale != 1 is introduced then use:
grad_bias += scale * grad_output_n.sum(dims);
*/
}
}
});
} // slow_conv_dilated_all_cuda_template
} // namespace
Tensor slow_conv_dilated2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<2>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<2>(
output_,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<2>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
Tensor slow_conv_dilated3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<3>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<3>(
output,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<3>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
| 2857a87f8dcb24d4c98d04bffc1f5d812092f8ba.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/im2col.cuh>
#include <ATen/native/cuda/vol2col.cuh>
#include <ATen/native/DilatedConvolutionUtils.h>
#include <c10/util/accumulate.h>
#include <tuple>
namespace at {
namespace native {
namespace {
// hyper-volume to column, CUDA
template <typename Dtype, int64_t dim>
void hvol2col(
cudaStream_t stream,
const Dtype* data_hvol,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_col) {
if (dim == 3) {
vol2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_col);
}
if (dim == 2) {
im2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_col);
}
}
// column to hyper-volume, CUDA
template <typename Dtype, int64_t dim>
void col2hvol(
cudaStream_t stream,
const Dtype* data_col,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_hvol) {
if (dim == 3) {
col2vol<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_hvol);
}
if (dim == 2) {
col2im<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_hvol);
}
}
/*
check tensor data locations
*/
void slow_conv_dilated_location_check(
CheckedFrom c,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output) {
// checking data locations of user-provided tensor arguments
TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3},
bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5};
checkAllSameGPU(c, {input_arg, weight_arg});
if (bias.defined()) {
checkAllSameGPU(c, {input_arg, bias_arg});
}
if (grad_output.defined()) {
checkAllSameGPU(c, {input_arg, grad_output_arg});
}
// we are not checking the data locations of other tensor
// arguments such as output, grad_input, etc because of these are
// allocated based on input options and hence these tensors always
// have the same data location as of input tensor.
}
/*
slow_conv_dilated_all_cuda_template
Main worker. Computes tensors output, grad_input, grad_weight,
and/or grad_bias if defined, respectively.
*/
template <int64_t dim>
void slow_conv_dilated_all_cuda_template(
Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
slow_conv_dilated_location_check(__func__, input, weight, bias, grad_output);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto options = input.options();
// The rear part of input tensor sizes:
auto input_size = input.sizes().slice(2);
// The rear part of output tensor sizes:
auto output_size = internal::get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
int64_t batchSize = input.size(0);
int64_t nInputPlane = weight.size(1);
int64_t nOutputPlane = weight.size(0);
// Temporary buffers:
const int64_t m = c10::multiply_integers(kernel_size);
const int64_t output_vsize = c10::multiply_integers(output_size);
Tensor columns = at::empty({0}, options);
if (output.defined() || grad_weight.defined() || grad_input.defined()) {
columns.resize_({nInputPlane * m, output_vsize});
}
// Initialize
if (grad_weight.defined()) {
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.zero_();
}
if (output.defined() && !bias.defined()) {
output.zero_();
}
#if defined(USE_ROCM)
/* When using ROCm, the sum evaluation is inaccurate for double
tensors. The reason is currently unknown. Hence, we use gemv for
computing `grad_output_n.sum(dims)` until the ROCm-sum issue is
resolved. */
Tensor ones = at::empty({0}, options);
if (grad_bias.defined()) {
ones.resize_({output_vsize});
ones.fill_(1);
}
/* MSVC does not like #ifdef-s inside the CPP macro
AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code
branching outside the CPP macro: */
#define CALCULATE_GRAD_BIAS \
at::cuda::blas::gemv<scalar_t>( \
/*trans=*/'t', \
/* m=*/output_vsize, \
/* n=*/nOutputPlane, \
/*alpha=*/static_cast<scalar_t>(1), \
/* A=*/grad_output_n.data_ptr<scalar_t>(), \
/* lda=*/output_vsize, \
/* x=*/ones.data_ptr<scalar_t>(), \
/* incx=*/1, \
/* beta=*/static_cast<scalar_t>(1), \
/* y=*/grad_bias.data_ptr<scalar_t>(), \
/* incy=*/1)
#else
#define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims)
#endif
// Helpers
Tensor grad_output_n;
std::vector<int64_t> dims(dim);
std::iota(dims.begin(), dims.end(), 1);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_dilated<>", [&] {
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix multiply per output:
Tensor input_n = input.select(0, elt);
// Output
if (output.defined()) {
Tensor output_n = output.select(0, elt);
if (bias.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
for (int n = 0; n < nOutputPlane; n++) {
output_n.select(0, n).fill_(bias[n]);
}
}
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'n',
/* m=*/columns.size(1),
/* n=*/nOutputPlane,
/* k=*/columns.size(0),
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/output_n.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
} else {
// All gradients
grad_output_n = grad_output.select(0, elt);
}
// Gradient of input:
if (grad_input.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'t',
/* m=*/columns.size(1),
/* n=*/columns.size(0),
/* k=*/nOutputPlane,
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/grad_output_n.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(0),
/* C=*/columns.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
// Unpack columns back into input:
Tensor grad_input_n = grad_input.select(0, elt);
col2hvol<scalar_t, dim>(
stream,
columns.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
grad_input_n.data_ptr<scalar_t>());
}
// Gradient of weight:
if (grad_weight.defined()) {
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
scalar_t scale = static_cast<scalar_t>(
1); // TODO: expose as argument?
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'t',
/*transb=*/'n',
/* m=*/columns.size(0),
/* n=*/nOutputPlane,
/* k=*/columns.size(1),
/* alpha=*/scale,
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/grad_output_n.data_ptr<scalar_t>(),
/* ldb=*/columns.size(1),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/grad_weight.data_ptr<scalar_t>(),
/* ldc=*/columns.size(0));
}
// Gradient of bias:
if (grad_bias.defined()) {
/* For gemv argument derivation, see
slow_conv_dilated_all_cpu_template in
ATen/native/DilatedConvolution.cpp */
CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s
inside the CPP macros, see above. */
/*
TODO: when scale != 1 is introduced then use:
grad_bias += scale * grad_output_n.sum(dims);
*/
}
}
});
} // slow_conv_dilated_all_cuda_template
} // namespace
Tensor slow_conv_dilated2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<2>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<2>(
output_,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<2>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
Tensor slow_conv_dilated3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<3>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<3>(
output,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<3>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
2e4b3f110bee3afaee6a24742150470d8c1dda57.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) NVIDIA Corporation and Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Limitations of current Longformer Attention CUDA Kernels:
// (1) Does not support global tokens in the middle. All global tokens shall be in the beginning of sequence.
// (2) Batch size <= 128 (defined in MAX_LONGFORMER_BATCH_SIZE)
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <math_constants.h>
#include <hip/library_types.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "longformer_attention_impl.h"
#include "attention_impl.h"
#include "attention_softmax.h"
using namespace onnxruntime::cuda;
using namespace cub;
#define CHECK(status) \
if (!CUBLAS_CALL(status)) { \
return false; \
}
constexpr int MAX_LONGFORMER_BATCH_SIZE = 128;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// Denote: batch size (B), sequence length (S), number of heads (N), dimension per head (H), max number of global tokens (G)
//
// Workspace layout (by default, the data type T is float or half):
// [SoftmaxSpace: see below] [Q:BxNxSxH] [K:BxNxSxH] [V:BxNxSxH] [Global_Q:BxNxGxH] [Global_K:BxNxSxH] [Global_V:BxNxSxH]
// where Global_Q, Global_K and Global_V are optional. They are not allocated when there is no any global token.
//
// SoftmaxSpace layout (tmp_storage could use the space of scratch1, scratch2, Q and K):
// [Global_Idx: int BxS][batch_global_num: int BxS][sequence_index: int BxS][tmp_storage: int 1024x1]
// [scratch1: BxNxSxS ] [scratch2: BxNxSxS ]
// Allocated size could be slightly larger than needed: batch_global_num uses only Bx1 and allocated BxS.
// Scratch size is allocated as multiples of 256.
size_t GetLongformerSoftmaxWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int sequence_length) {
size_t temp_size = sizeof(int) * 1024;
size_t scratch_size = 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
return 3 * batch_size * sequence_length * sizeof(int) + ::max(scratch_size, temp_size);
}
size_t GetLongformerAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int max_num_global) {
size_t softmax_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length);
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
size_t global_qkv_size = max_num_global > 0 ? qkv_size : 0;
return softmax_size + qkv_size + global_qkv_size;
}
__global__ void InitSequenceIndexKernel(int* sequence_index, int sequence_length) {
int batch_index = blockIdx.x;
for (int i = threadIdx.x; i < sequence_length; i += blockDim.x) {
sequence_index[batch_index * sequence_length + i] = i;
}
}
// TODO: Move this to its own plugin that can be run once for all layers.
int* BuildGlobalIndex(hipStream_t stream, const int* global_attention, int batch_size, int sequence_length, void* workspace, size_t softmax_workspace_size) {
int* global_idx = reinterpret_cast<int*>(workspace);
int* batch_global_num = global_idx + batch_size * sequence_length; // Number of global tokens in each batch, shape is (batch_size)
int* sequence_index = batch_global_num + batch_size * sequence_length;
int* tmp_storage = sequence_index + batch_size * sequence_length;
hipLaunchKernelGGL(( InitSequenceIndexKernel), dim3(batch_size), dim3(128), 0, stream, sequence_index, sequence_length);
// Determine temporary device storage requirements
// Find the global attention indices and number of global attention tokens
size_t temp_storage_bytes = 0;
cub::DevicePartition::Flagged(NULL, temp_storage_bytes, sequence_index,
global_attention, global_idx, batch_global_num, sequence_length, stream);
assert(temp_storage_bytes <= softmax_workspace_size - static_cast<size_t>(3 * batch_size * sequence_length));
for (int i = 0; i < batch_size; ++i) {
cub::DevicePartition::Flagged(reinterpret_cast<void*>(tmp_storage), temp_storage_bytes, sequence_index,
global_attention + i * sequence_length, global_idx + i * sequence_length,
batch_global_num + i, sequence_length, stream);
}
return global_idx;
}
template <typename T, int blockSize>
__launch_bounds__(blockSize)
__global__ void LongformerSoftmaxKernel(const int* global_attention,
const int* global_idx,
const int* batch_global_num,
const T* input,
const T* attention_mask,
T* output,
float scaler,
int dim0,
int sequence_length,
int attention_window) {
typedef hipcub::BlockReduce<float, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage block_reduce_temp;
__shared__ float max_shared;
__shared__ float sum_shared;
const T* input_block = input + sequence_length * blockIdx.x;
T* output_block = output + sequence_length * blockIdx.x;
const int batch_index = blockIdx.x / dim0;
const int row_index = blockIdx.x % sequence_length;
const int global_num = batch_global_num[batch_index];
// To be consistent with Huggingface Longformer, the row of maksed word are set as zero.
if ((float)attention_mask[batch_index * sequence_length + row_index] < 0.0f) {
for (int i = threadIdx.x; i < sequence_length; i += blockSize) {
output_block[i] = (T)(0);
}
return;
}
// local attention token
int col_start = 0;
int col_end = sequence_length;
bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == (int)0);
if (is_local_row) {
col_start = row_index - attention_window;
if (col_start < 0) {
col_start = 0;
}
col_end = row_index + attention_window + 1;
if (col_end > sequence_length) {
col_end = sequence_length;
}
}
const T* mask_block = attention_mask + sequence_length * batch_index;
int tid = threadIdx.x;
// calculate max input
float max_input = -CUDART_INF_F;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_idx[g];
if (i < col_start || i > col_end) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
}
}
//__syncthreads();
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, hipcub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
float sum_input = 0.f;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_idx[g];
if (i < col_start || i > col_end) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
}
}
//__syncthreads();
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, hipcub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
if (is_local_row) {
// We only need to fill in zeros for blocks that will be used in the matrix multiplication
// following the Softmax.
//
// For now zero-out only [row_index - 2*attention_window, row_index + 2*attention_window],
// we can even be more agressive and reduce the zeroing out window size since
// each row has entries in 3 blocks (3*attention_window size instead of 4*attention_window)
int zero_start = row_index - 2 * attention_window;
if (zero_start < 0) {
zero_start = 0;
}
int zero_end = row_index + 2 * attention_window;
if (zero_end > sequence_length) {
zero_end = sequence_length;
}
for (int i = tid + zero_start; i < zero_end; i += blockSize) {
output_block[i] = (T)(0.);
}
for (int g = tid; g < global_num; g += blockSize) {
int i = global_idx[g];
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
bool launchSoftmaxKernel(
hipStream_t stream,
hipblasHandle_t cublas,
void* workspace,
size_t softmax_workspace_size,
const void* q, // transposed Q with shape (B, N, S, H)
const void* k, // transposed K with shape (B, N, S, H)
const void* v, // transposed V with shape (B, N, S, H)
const void* attention_mask, // attention mask with shape (B, S), with value 0 not masked and -10000 masked.
const void* global_q, // Q for global tokens with shape (B, N, G, H)
const void* global_k, // K for global tokens with shape (B, N, S, H)
const void* global_v, // V for global tokens with shape (B, N, S, H)
const int* global_attention, // global attention with shape (B, S), with value 0 for local attention and 1 for global attention.
void* output, // output with shape (B, N, S, H)
float scaler, // scalar
int batch_size, // batch size
int sequence_length, // sequence length
int num_heads, // number of heads
int head_size, // hidden size per head
int attention_window, // one sided windows size
int max_num_global, // maximum number of global tokens (G) in all batches
size_t element_size) { // size of element: 2 for half, and 4 for float
if (batch_size > MAX_LONGFORMER_BATCH_SIZE) {
ORT_THROW("LongformerAttention CUDA operator does not support batch size > 128.");
}
bool is_fp16 = (element_size == 2);
void* scratch1 = reinterpret_cast<char*>(workspace) + 3 * sizeof(int) * batch_size * sequence_length;
void* scratch2 = reinterpret_cast<char*>(scratch1) + GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
// Build index for global tokens
int* global_idx = BuildGlobalIndex(stream, global_attention, batch_size, sequence_length, workspace, softmax_workspace_size);
int* batch_global_num = global_idx + batch_size * sequence_length;
int num_global[MAX_LONGFORMER_BATCH_SIZE] = {-1};
if (!CUDA_CALL(hipMemcpyAsync(&num_global[0], batch_global_num, batch_size * sizeof(int), hipMemcpyDeviceToHost, stream))) {
return false;
}
// setup shared parameters for two strided batched matrix multiplies
hipDataType Atype;
hipDataType Btype;
hipDataType Ctype;
hipDataType resultType;
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
__half one_fp16, zero_fp16;
float one_fp32, zero_fp32;
void *alpha, *beta_0, *beta_1;
if (is_fp16) {
one_fp16 = __float2half(1.f);
zero_fp16 = __float2half(0.f);
alpha = static_cast<void*>(&one_fp16);
beta_0 = static_cast<void*>(&zero_fp16);
beta_1 = static_cast<void*>(&one_fp16);
Atype = HIP_R_16F;
Btype = HIP_R_16F;
Ctype = HIP_R_16F;
resultType = HIP_R_16F;
algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
} else {
one_fp32 = 1.f;
zero_fp32 = 0.f;
alpha = static_cast<void*>(&one_fp32);
beta_0 = static_cast<void*>(&zero_fp32);
beta_1 = static_cast<void*>(&one_fp32);
Atype = HIP_R_32F;
Btype = HIP_R_32F;
Ctype = HIP_R_32F;
resultType = HIP_R_32F;
}
// Strided batch matrix multiply
// qk = q * k^T
// Shapes: q and k = B x N x S x H, qk = B x N x S x S
// Convert col-major to row-major by swapping q and k in Gemm
// Local attention part
// S x S is calculated using sliding block WxW (W is one sided window size) like the following:
// [W][W]
// [W][W][W]
// [W][W][W]
// [W][W]
// The first and last rows have 2 blocks, and the remaining has 3 blocks per row.
// The calculation are splited into 3 parts. Firstly, fill the middle rows, then the first row and finally the last row.
// The results are stored in scratch1.
// TODO: Save space by not storing the whole matrix. Instead only allocate space for these blocks.
int w = attention_window;
int x_offset = num_heads * sequence_length * head_size;
int y_offset = num_heads * sequence_length * sequence_length;
int last_block = (sequence_length / w) - 1;
int strideA = sequence_length * head_size;
int strideB = sequence_length * head_size;
int strideC = sequence_length * sequence_length;
// When S == 2W, there is no middle rows of blocks:
// [W][W]
// [W][W]
// We can use normal matrix multiplication in this case.
if (sequence_length == 2 * w) {
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
sequence_length,
sequence_length,
head_size,
alpha,
k,
Atype,
head_size,
sequence_length * head_size,
q,
Btype,
head_size,
sequence_length * head_size,
beta_0,
scratch1,
Ctype,
sequence_length,
sequence_length * sequence_length,
batch_size * num_heads,
resultType,
algo));
} else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
void* q_head = (char*)q + (i * x_offset + j * sequence_length * head_size + w * head_size) * element_size;
void* k_head = (char*)k + (i * x_offset + j * sequence_length * head_size) * element_size;
void* qk_head = (char*)scratch1 + (i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
3 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
w * head_size, // strideB
beta_0, // beta
qk_head, // C
Ctype, // C type
sequence_length, // ldc
sequence_length * w + w, // strideC
count, // batch count
resultType,
algo));
}
}
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k, // A
Atype, // A type
head_size, // lda
strideA, // strideA
q, // B
Btype, // B type
head_size, // ldb
strideB, // strideB
beta_0, // beta
scratch1, // C
Ctype, // C type
sequence_length, // ldc
strideC, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
void* q_head = (char*)q + (last_block * w * head_size) * element_size;
void* k_head = (char*)k + ((last_block - 1) * w * head_size) * element_size;
void* qk_head = (char*)scratch1 + (last_block * w * sequence_length + (last_block - 1) * w) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
2 * w,
w,
head_size,
alpha,
k_head,
Atype,
head_size,
strideA,
q_head,
Btype,
head_size,
strideB,
beta_0,
qk_head,
Ctype,
sequence_length,
strideC,
batch_size * num_heads,
resultType,
algo));
}
// Global attention part
for (int i = 0; i < batch_size; ++i) {
if (num_global[i] > 0) {
void* q_batch = (char*)q + (i * x_offset) * element_size;
void* k_batch = (char*)k + (i * x_offset) * element_size;
void* qk_batch = (char*)scratch1 + (i * y_offset) * element_size;
// Local tokens attending global tokens
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
num_global[i],
sequence_length,
head_size,
alpha,
k_batch,
Atype,
head_size,
strideA,
q_batch,
Btype,
head_size,
strideB,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
void* global_q_batch = (char*)global_q + (i * num_heads * max_num_global * head_size) * element_size;
void* global_k_batch = (char*)global_k + (i * x_offset) * element_size;
int strideB_global = max_num_global * head_size;
// Global tokens attending everything
// This GEMMs need to be last to make sure all global token entries are re-written.
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
sequence_length,
num_global[i],
head_size,
alpha,
global_k_batch,
Atype,
head_size,
strideA,
global_q_batch,
Btype,
head_size,
strideB_global,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
}
}
int dim0 = sequence_length * num_heads;
int dim1 = sequence_length;
void* softmax_out = scratch2;
const int blockSize = 64;
const int gridSize = batch_size * num_heads * sequence_length;
if (is_fp16) {
hipLaunchKernelGGL(( LongformerSoftmaxKernel<__half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream,
global_attention,
global_idx,
batch_global_num,
static_cast<const __half*>(scratch1),
static_cast<const __half*>(attention_mask),
static_cast<__half*>(softmax_out), scaler, dim0, dim1, attention_window);
} else {
hipLaunchKernelGGL(( LongformerSoftmaxKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream,
global_attention,
global_idx,
batch_global_num,
static_cast<const float*>(scratch1),
static_cast<const float*>(attention_mask),
static_cast<float*>(softmax_out), scaler, dim0, dim1, attention_window);
}
// Run the matrix multiply: output = softmax_out * v
// softmax_out: B x N x S x S
// v: B x N x S x H
// attn_out: B x N x S x H
// Calculation uses full Gemm (S == 2W) or sliding blocks (S > 2W) in a way similar to local attention part.
if (sequence_length == 2 * w) {
// convert col-major to row-major by swapping softmax_out and v
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
sequence_length,
sequence_length,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
}
else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
void* v_head = (char*)v + (i * x_offset + j * head_size * sequence_length) * element_size;
void* prob_head = (char*)softmax_out + (i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
void* out_head = (char*)output + (i * x_offset + j * head_size * sequence_length + w * head_size) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
w,
3 * w,
alpha,
v_head,
Atype,
head_size,
w * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * w + w,
beta_0,
out_head,
Ctype,
head_size,
w * head_size,
count,
resultType,
algo));
}
}
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
void* v_head = (char*)v + (last_block - 1) * w * head_size * element_size;
void* prob_head = (char*)softmax_out + (sequence_length * last_block * w + (last_block - 1) * w) * element_size;
void* out_head = (char*)output + last_block * w * head_size * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
out_head,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
}
for (int i = 0; i < batch_size; ++i) {
if (num_global[i] > 0) {
int glob_longdim_mm = (last_block - 1) * w;
void* v_head = (char*)v + (i * x_offset) * element_size;
void* prob_head = (char*)softmax_out + (i * y_offset + 2 * w * sequence_length) * element_size;
void* out_head = (char*)output + (i * x_offset + 2 * w * head_size) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
glob_longdim_mm,
num_global[i],
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_1,
out_head,
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
// Global tokens
v_head = (char*)global_v + (i * x_offset) * element_size;
prob_head = (char*)softmax_out + (i * y_offset) * element_size;
out_head = (char*)output + (i * x_offset) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size,
num_global[i],
sequence_length, // Re-write entries completely
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0, // Use beta=0 to overwrite
out_head, // Here assumes global tokens are at the beginning of sequence.
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
}
}
return true;
}
template <typename T>
bool LongformerQkvToContext(
const hipDeviceProp_t& prop, hipblasHandle_t& cublas, hipStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size,
const int window, const size_t element_size,
const T* input, const T* attention_mask,
const T* global_input, const int* global_attention, const int max_num_global,
T* workspace,
T* output) {
size_t softmax_workspace_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length);
T* qkv = reinterpret_cast<T*>((char*)workspace + softmax_workspace_size);
// Input should be BxSx3xNxH => qkv: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, qkv)) {
return false;
}
// Input 'global_input' should be BxSx3xNxH => global_qkv: 3xBxNxSxH
T* global_qkv = qkv + 3 * batch_size * sequence_length * num_heads * head_size * element_size;
// When there is no global token, no need to process global Q, K and V
if (max_num_global > 0 && nullptr != global_input) {
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, global_input, global_qkv)) {
return false;
}
}
// Now qkv has Q, K, V: each has size BxNxSxH
const int elements = batch_size * num_heads * sequence_length * head_size;
const T* q = qkv;
const T* k = q + elements;
const T* v = k + elements;
const T* global_q = global_qkv;
const T* global_k = global_q + elements;
const T* global_v = global_k + elements;
hipblasSetStream(cublas, stream);
CublasMathModeSetter helper(prop, cublas, CUBLAS_TENSOR_OP_MATH);
// Q*K' are scaled by 1/sqrt(H)
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
hipDeviceSynchronize();
T* temp_output = qkv; // Q will be overwritten
if (!launchSoftmaxKernel(
stream,
cublas,
workspace,
softmax_workspace_size,
q, // Transposed Q with shape B x N x S x H
k, // Transposed K with shape B x N x S x H
v, // Transposed V with shape B x N x S x H
attention_mask, // Attention mask flags with shape B x S
global_q, // Transposed global Q with shape B x N x G x H
global_k, // Transposed global K with shape B x N x S x H
global_v, // Transposed global V with shape B x N x S x H
global_attention, // Global attention flags with shape B x S
temp_output, // Output with shape B x N x S x H
rsqrt_head_size, // Scaler
batch_size, // Batch size
sequence_length, // Sequence length
num_heads, // Number of attention heads
head_size, // Hidden size per head
window, // Half (one-sided) windows size
max_num_global, // Maximum number of global tokens (G)
element_size)) {
return false;
}
// The temp_output is BxNxSxH, transpose it to final output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, temp_output, output);
}
bool LaunchLongformerAttentionKernel(
const hipDeviceProp_t& prop,
const void* input,
const void* attention_mask,
const void* global_input,
const int* global_attention,
void* output,
int batch_size,
int sequence_length,
int num_heads,
int head_size,
int window,
int max_num_global,
void* workspace,
hipblasHandle_t& cublas,
const size_t element_size) {
// use default stream
const hipStream_t stream = nullptr;
if (element_size == 2) {
return LongformerQkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const half*>(input),
reinterpret_cast<const half*>(attention_mask),
reinterpret_cast<const half*>(global_input),
global_attention,
max_num_global,
reinterpret_cast<half*>(workspace),
reinterpret_cast<half*>(output));
} else {
return LongformerQkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const float*>(input),
reinterpret_cast<const float*>(attention_mask),
reinterpret_cast<const float*>(global_input),
global_attention,
max_num_global,
reinterpret_cast<float*>(workspace),
reinterpret_cast<float*>(output));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 2e4b3f110bee3afaee6a24742150470d8c1dda57.cu | /*
Copyright (c) NVIDIA Corporation and Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Limitations of current Longformer Attention CUDA Kernels:
// (1) Does not support global tokens in the middle. All global tokens shall be in the beginning of sequence.
// (2) Batch size <= 128 (defined in MAX_LONGFORMER_BATCH_SIZE)
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <math_constants.h>
#include <library_types.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "longformer_attention_impl.h"
#include "attention_impl.h"
#include "attention_softmax.h"
using namespace onnxruntime::cuda;
using namespace cub;
#define CHECK(status) \
if (!CUBLAS_CALL(status)) { \
return false; \
}
constexpr int MAX_LONGFORMER_BATCH_SIZE = 128;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// Denote: batch size (B), sequence length (S), number of heads (N), dimension per head (H), max number of global tokens (G)
//
// Workspace layout (by default, the data type T is float or half):
// [SoftmaxSpace: see below] [Q:BxNxSxH] [K:BxNxSxH] [V:BxNxSxH] [Global_Q:BxNxGxH] [Global_K:BxNxSxH] [Global_V:BxNxSxH]
// where Global_Q, Global_K and Global_V are optional. They are not allocated when there is no any global token.
//
// SoftmaxSpace layout (tmp_storage could use the space of scratch1, scratch2, Q and K):
// [Global_Idx: int BxS][batch_global_num: int BxS][sequence_index: int BxS][tmp_storage: int 1024x1]
// [scratch1: BxNxSxS ] [scratch2: BxNxSxS ]
// Allocated size could be slightly larger than needed: batch_global_num uses only Bx1 and allocated BxS.
// Scratch size is allocated as multiples of 256.
size_t GetLongformerSoftmaxWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int sequence_length) {
size_t temp_size = sizeof(int) * 1024;
size_t scratch_size = 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
return 3 * batch_size * sequence_length * sizeof(int) + std::max(scratch_size, temp_size);
}
size_t GetLongformerAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int max_num_global) {
size_t softmax_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length);
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
size_t global_qkv_size = max_num_global > 0 ? qkv_size : 0;
return softmax_size + qkv_size + global_qkv_size;
}
__global__ void InitSequenceIndexKernel(int* sequence_index, int sequence_length) {
int batch_index = blockIdx.x;
for (int i = threadIdx.x; i < sequence_length; i += blockDim.x) {
sequence_index[batch_index * sequence_length + i] = i;
}
}
// TODO: Move this to its own plugin that can be run once for all layers.
int* BuildGlobalIndex(cudaStream_t stream, const int* global_attention, int batch_size, int sequence_length, void* workspace, size_t softmax_workspace_size) {
int* global_idx = reinterpret_cast<int*>(workspace);
int* batch_global_num = global_idx + batch_size * sequence_length; // Number of global tokens in each batch, shape is (batch_size)
int* sequence_index = batch_global_num + batch_size * sequence_length;
int* tmp_storage = sequence_index + batch_size * sequence_length;
InitSequenceIndexKernel<<<batch_size, 128, 0, stream>>>(sequence_index, sequence_length);
// Determine temporary device storage requirements
// Find the global attention indices and number of global attention tokens
size_t temp_storage_bytes = 0;
cub::DevicePartition::Flagged(NULL, temp_storage_bytes, sequence_index,
global_attention, global_idx, batch_global_num, sequence_length, stream);
assert(temp_storage_bytes <= softmax_workspace_size - static_cast<size_t>(3 * batch_size * sequence_length));
for (int i = 0; i < batch_size; ++i) {
cub::DevicePartition::Flagged(reinterpret_cast<void*>(tmp_storage), temp_storage_bytes, sequence_index,
global_attention + i * sequence_length, global_idx + i * sequence_length,
batch_global_num + i, sequence_length, stream);
}
return global_idx;
}
template <typename T, int blockSize>
__launch_bounds__(blockSize)
__global__ void LongformerSoftmaxKernel(const int* global_attention,
const int* global_idx,
const int* batch_global_num,
const T* input,
const T* attention_mask,
T* output,
float scaler,
int dim0,
int sequence_length,
int attention_window) {
typedef cub::BlockReduce<float, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage block_reduce_temp;
__shared__ float max_shared;
__shared__ float sum_shared;
const T* input_block = input + sequence_length * blockIdx.x;
T* output_block = output + sequence_length * blockIdx.x;
const int batch_index = blockIdx.x / dim0;
const int row_index = blockIdx.x % sequence_length;
const int global_num = batch_global_num[batch_index];
// To be consistent with Huggingface Longformer, the row of maksed word are set as zero.
if ((float)attention_mask[batch_index * sequence_length + row_index] < 0.0f) {
for (int i = threadIdx.x; i < sequence_length; i += blockSize) {
output_block[i] = (T)(0);
}
return;
}
// local attention token
int col_start = 0;
int col_end = sequence_length;
bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == (int)0);
if (is_local_row) {
col_start = row_index - attention_window;
if (col_start < 0) {
col_start = 0;
}
col_end = row_index + attention_window + 1;
if (col_end > sequence_length) {
col_end = sequence_length;
}
}
const T* mask_block = attention_mask + sequence_length * batch_index;
int tid = threadIdx.x;
// calculate max input
float max_input = -CUDART_INF_F;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_idx[g];
if (i < col_start || i > col_end) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x) {
max_input = x;
}
}
}
}
//__syncthreads();
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
float sum_input = 0.f;
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
if (is_local_row) {
for (int g = tid; g < global_num; g += blockSize) {
int i = global_idx[g];
if (i < col_start || i > col_end) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
}
}
//__syncthreads();
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
if (is_local_row) {
// We only need to fill in zeros for blocks that will be used in the matrix multiplication
// following the Softmax.
//
// For now zero-out only [row_index - 2*attention_window, row_index + 2*attention_window],
// we can even be more agressive and reduce the zeroing out window size since
// each row has entries in 3 blocks (3*attention_window size instead of 4*attention_window)
int zero_start = row_index - 2 * attention_window;
if (zero_start < 0) {
zero_start = 0;
}
int zero_end = row_index + 2 * attention_window;
if (zero_end > sequence_length) {
zero_end = sequence_length;
}
for (int i = tid + zero_start; i < zero_end; i += blockSize) {
output_block[i] = (T)(0.);
}
for (int g = tid; g < global_num; g += blockSize) {
int i = global_idx[g];
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
// #pragma unroll 16
for (int i = tid + col_start; i < col_end; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
bool launchSoftmaxKernel(
cudaStream_t stream,
cublasHandle_t cublas,
void* workspace,
size_t softmax_workspace_size,
const void* q, // transposed Q with shape (B, N, S, H)
const void* k, // transposed K with shape (B, N, S, H)
const void* v, // transposed V with shape (B, N, S, H)
const void* attention_mask, // attention mask with shape (B, S), with value 0 not masked and -10000 masked.
const void* global_q, // Q for global tokens with shape (B, N, G, H)
const void* global_k, // K for global tokens with shape (B, N, S, H)
const void* global_v, // V for global tokens with shape (B, N, S, H)
const int* global_attention, // global attention with shape (B, S), with value 0 for local attention and 1 for global attention.
void* output, // output with shape (B, N, S, H)
float scaler, // scalar
int batch_size, // batch size
int sequence_length, // sequence length
int num_heads, // number of heads
int head_size, // hidden size per head
int attention_window, // one sided windows size
int max_num_global, // maximum number of global tokens (G) in all batches
size_t element_size) { // size of element: 2 for half, and 4 for float
if (batch_size > MAX_LONGFORMER_BATCH_SIZE) {
ORT_THROW("LongformerAttention CUDA operator does not support batch size > 128.");
}
bool is_fp16 = (element_size == 2);
void* scratch1 = reinterpret_cast<char*>(workspace) + 3 * sizeof(int) * batch_size * sequence_length;
void* scratch2 = reinterpret_cast<char*>(scratch1) + GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
// Build index for global tokens
int* global_idx = BuildGlobalIndex(stream, global_attention, batch_size, sequence_length, workspace, softmax_workspace_size);
int* batch_global_num = global_idx + batch_size * sequence_length;
int num_global[MAX_LONGFORMER_BATCH_SIZE] = {-1};
if (!CUDA_CALL(cudaMemcpyAsync(&num_global[0], batch_global_num, batch_size * sizeof(int), cudaMemcpyDeviceToHost, stream))) {
return false;
}
// setup shared parameters for two strided batched matrix multiplies
cudaDataType_t Atype;
cudaDataType_t Btype;
cudaDataType_t Ctype;
cudaDataType_t resultType;
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
__half one_fp16, zero_fp16;
float one_fp32, zero_fp32;
void *alpha, *beta_0, *beta_1;
if (is_fp16) {
one_fp16 = __float2half(1.f);
zero_fp16 = __float2half(0.f);
alpha = static_cast<void*>(&one_fp16);
beta_0 = static_cast<void*>(&zero_fp16);
beta_1 = static_cast<void*>(&one_fp16);
Atype = CUDA_R_16F;
Btype = CUDA_R_16F;
Ctype = CUDA_R_16F;
resultType = CUDA_R_16F;
algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
} else {
one_fp32 = 1.f;
zero_fp32 = 0.f;
alpha = static_cast<void*>(&one_fp32);
beta_0 = static_cast<void*>(&zero_fp32);
beta_1 = static_cast<void*>(&one_fp32);
Atype = CUDA_R_32F;
Btype = CUDA_R_32F;
Ctype = CUDA_R_32F;
resultType = CUDA_R_32F;
}
// Strided batch matrix multiply
// qk = q * k^T
// Shapes: q and k = B x N x S x H, qk = B x N x S x S
// Convert col-major to row-major by swapping q and k in Gemm
// Local attention part
// S x S is calculated using sliding block WxW (W is one sided window size) like the following:
// [W][W]
// [W][W][W]
// [W][W][W]
// [W][W]
// The first and last rows have 2 blocks, and the remaining has 3 blocks per row.
// The calculation are splited into 3 parts. Firstly, fill the middle rows, then the first row and finally the last row.
// The results are stored in scratch1.
// TODO: Save space by not storing the whole matrix. Instead only allocate space for these blocks.
int w = attention_window;
int x_offset = num_heads * sequence_length * head_size;
int y_offset = num_heads * sequence_length * sequence_length;
int last_block = (sequence_length / w) - 1;
int strideA = sequence_length * head_size;
int strideB = sequence_length * head_size;
int strideC = sequence_length * sequence_length;
// When S == 2W, there is no middle rows of blocks:
// [W][W]
// [W][W]
// We can use normal matrix multiplication in this case.
if (sequence_length == 2 * w) {
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
sequence_length,
sequence_length,
head_size,
alpha,
k,
Atype,
head_size,
sequence_length * head_size,
q,
Btype,
head_size,
sequence_length * head_size,
beta_0,
scratch1,
Ctype,
sequence_length,
sequence_length * sequence_length,
batch_size * num_heads,
resultType,
algo));
} else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
void* q_head = (char*)q + (i * x_offset + j * sequence_length * head_size + w * head_size) * element_size;
void* k_head = (char*)k + (i * x_offset + j * sequence_length * head_size) * element_size;
void* qk_head = (char*)scratch1 + (i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
3 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
w * head_size, // strideB
beta_0, // beta
qk_head, // C
Ctype, // C type
sequence_length, // ldc
sequence_length * w + w, // strideC
count, // batch count
resultType,
algo));
}
}
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k, // A
Atype, // A type
head_size, // lda
strideA, // strideA
q, // B
Btype, // B type
head_size, // ldb
strideB, // strideB
beta_0, // beta
scratch1, // C
Ctype, // C type
sequence_length, // ldc
strideC, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
void* q_head = (char*)q + (last_block * w * head_size) * element_size;
void* k_head = (char*)k + ((last_block - 1) * w * head_size) * element_size;
void* qk_head = (char*)scratch1 + (last_block * w * sequence_length + (last_block - 1) * w) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
2 * w,
w,
head_size,
alpha,
k_head,
Atype,
head_size,
strideA,
q_head,
Btype,
head_size,
strideB,
beta_0,
qk_head,
Ctype,
sequence_length,
strideC,
batch_size * num_heads,
resultType,
algo));
}
// Global attention part
for (int i = 0; i < batch_size; ++i) {
if (num_global[i] > 0) {
void* q_batch = (char*)q + (i * x_offset) * element_size;
void* k_batch = (char*)k + (i * x_offset) * element_size;
void* qk_batch = (char*)scratch1 + (i * y_offset) * element_size;
// Local tokens attending global tokens
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
num_global[i],
sequence_length,
head_size,
alpha,
k_batch,
Atype,
head_size,
strideA,
q_batch,
Btype,
head_size,
strideB,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
void* global_q_batch = (char*)global_q + (i * num_heads * max_num_global * head_size) * element_size;
void* global_k_batch = (char*)global_k + (i * x_offset) * element_size;
int strideB_global = max_num_global * head_size;
// Global tokens attending everything
// This GEMMs need to be last to make sure all global token entries are re-written.
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
sequence_length,
num_global[i],
head_size,
alpha,
global_k_batch,
Atype,
head_size,
strideA,
global_q_batch,
Btype,
head_size,
strideB_global,
beta_0,
qk_batch,
Ctype,
sequence_length,
strideC,
num_heads,
resultType,
algo));
}
}
int dim0 = sequence_length * num_heads;
int dim1 = sequence_length;
void* softmax_out = scratch2;
const int blockSize = 64;
const int gridSize = batch_size * num_heads * sequence_length;
if (is_fp16) {
LongformerSoftmaxKernel<__half, blockSize><<<gridSize, blockSize, 0, stream>>>(
global_attention,
global_idx,
batch_global_num,
static_cast<const __half*>(scratch1),
static_cast<const __half*>(attention_mask),
static_cast<__half*>(softmax_out), scaler, dim0, dim1, attention_window);
} else {
LongformerSoftmaxKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(
global_attention,
global_idx,
batch_global_num,
static_cast<const float*>(scratch1),
static_cast<const float*>(attention_mask),
static_cast<float*>(softmax_out), scaler, dim0, dim1, attention_window);
}
// Run the matrix multiply: output = softmax_out * v
// softmax_out: B x N x S x S
// v: B x N x S x H
// attn_out: B x N x S x H
// Calculation uses full Gemm (S == 2W) or sliding blocks (S > 2W) in a way similar to local attention part.
if (sequence_length == 2 * w) {
// convert col-major to row-major by swapping softmax_out and v
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
sequence_length,
sequence_length,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
}
else { // sequence_length > 2 * w
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
void* v_head = (char*)v + (i * x_offset + j * head_size * sequence_length) * element_size;
void* prob_head = (char*)softmax_out + (i * y_offset + j * sequence_length * sequence_length + w * sequence_length) * element_size;
void* out_head = (char*)output + (i * x_offset + j * head_size * sequence_length + w * head_size) * element_size;
int count = (sequence_length - 2 * w) / w;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
w,
3 * w,
alpha,
v_head,
Atype,
head_size,
w * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * w + w,
beta_0,
out_head,
Ctype,
head_size,
w * head_size,
count,
resultType,
algo));
}
}
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v,
Atype,
head_size,
sequence_length * head_size,
softmax_out,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
output,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
void* v_head = (char*)v + (last_block - 1) * w * head_size * element_size;
void* prob_head = (char*)softmax_out + (sequence_length * last_block * w + (last_block - 1) * w) * element_size;
void* out_head = (char*)output + last_block * w * head_size * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
w,
2 * w,
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0,
out_head,
Ctype,
head_size,
sequence_length * head_size,
batch_size * num_heads,
resultType,
algo));
}
for (int i = 0; i < batch_size; ++i) {
if (num_global[i] > 0) {
int glob_longdim_mm = (last_block - 1) * w;
void* v_head = (char*)v + (i * x_offset) * element_size;
void* prob_head = (char*)softmax_out + (i * y_offset + 2 * w * sequence_length) * element_size;
void* out_head = (char*)output + (i * x_offset + 2 * w * head_size) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
glob_longdim_mm,
num_global[i],
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_1,
out_head,
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
// Global tokens
v_head = (char*)global_v + (i * x_offset) * element_size;
prob_head = (char*)softmax_out + (i * y_offset) * element_size;
out_head = (char*)output + (i * x_offset) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size,
num_global[i],
sequence_length, // Re-write entries completely
alpha,
v_head,
Atype,
head_size,
sequence_length * head_size,
prob_head,
Btype,
sequence_length,
sequence_length * sequence_length,
beta_0, // Use beta=0 to overwrite
out_head, // Here assumes global tokens are at the beginning of sequence.
Ctype,
head_size,
sequence_length * head_size,
num_heads,
resultType,
algo));
}
}
return true;
}
template <typename T>
bool LongformerQkvToContext(
const cudaDeviceProp& prop, cublasHandle_t& cublas, cudaStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size,
const int window, const size_t element_size,
const T* input, const T* attention_mask,
const T* global_input, const int* global_attention, const int max_num_global,
T* workspace,
T* output) {
size_t softmax_workspace_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length);
T* qkv = reinterpret_cast<T*>((char*)workspace + softmax_workspace_size);
// Input should be BxSx3xNxH => qkv: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, qkv)) {
return false;
}
// Input 'global_input' should be BxSx3xNxH => global_qkv: 3xBxNxSxH
T* global_qkv = qkv + 3 * batch_size * sequence_length * num_heads * head_size * element_size;
// When there is no global token, no need to process global Q, K and V
if (max_num_global > 0 && nullptr != global_input) {
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, global_input, global_qkv)) {
return false;
}
}
// Now qkv has Q, K, V: each has size BxNxSxH
const int elements = batch_size * num_heads * sequence_length * head_size;
const T* q = qkv;
const T* k = q + elements;
const T* v = k + elements;
const T* global_q = global_qkv;
const T* global_k = global_q + elements;
const T* global_v = global_k + elements;
cublasSetStream(cublas, stream);
CublasMathModeSetter helper(prop, cublas, CUBLAS_TENSOR_OP_MATH);
// Q*K' are scaled by 1/sqrt(H)
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
cudaDeviceSynchronize();
T* temp_output = qkv; // Q will be overwritten
if (!launchSoftmaxKernel(
stream,
cublas,
workspace,
softmax_workspace_size,
q, // Transposed Q with shape B x N x S x H
k, // Transposed K with shape B x N x S x H
v, // Transposed V with shape B x N x S x H
attention_mask, // Attention mask flags with shape B x S
global_q, // Transposed global Q with shape B x N x G x H
global_k, // Transposed global K with shape B x N x S x H
global_v, // Transposed global V with shape B x N x S x H
global_attention, // Global attention flags with shape B x S
temp_output, // Output with shape B x N x S x H
rsqrt_head_size, // Scaler
batch_size, // Batch size
sequence_length, // Sequence length
num_heads, // Number of attention heads
head_size, // Hidden size per head
window, // Half (one-sided) windows size
max_num_global, // Maximum number of global tokens (G)
element_size)) {
return false;
}
// The temp_output is BxNxSxH, transpose it to final output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, temp_output, output);
}
bool LaunchLongformerAttentionKernel(
const cudaDeviceProp& prop,
const void* input,
const void* attention_mask,
const void* global_input,
const int* global_attention,
void* output,
int batch_size,
int sequence_length,
int num_heads,
int head_size,
int window,
int max_num_global,
void* workspace,
cublasHandle_t& cublas,
const size_t element_size) {
// use default stream
const cudaStream_t stream = nullptr;
if (element_size == 2) {
return LongformerQkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const half*>(input),
reinterpret_cast<const half*>(attention_mask),
reinterpret_cast<const half*>(global_input),
global_attention,
max_num_global,
reinterpret_cast<half*>(workspace),
reinterpret_cast<half*>(output));
} else {
return LongformerQkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const float*>(input),
reinterpret_cast<const float*>(attention_mask),
reinterpret_cast<const float*>(global_input),
global_attention,
max_num_global,
reinterpret_cast<float*>(workspace),
reinterpret_cast<float*>(output));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
0fe61894f301062bd6e83e0f3675d124c3c074c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 4
//Note: N should always be in powers of 2 (like 2, 4, 8, 16, 32, ...) -Mohit Agrawal
__global__ void FindMin(int* input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
int fst = tid * step_size * 2;
int snd = fst + step_size;
if(input[fst] >= input[snd])
{
input[fst] = input[snd];
}
else
{
input[fst] = input[fst];
}
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main()
{
time_t t;
srand((unsigned) time(&t));
int *h;
h = (int*)malloc(N*sizeof(int));
for(int i=0; i<N; i++)
{
h[i] = rand()%N;
}
for(int i=0; i<N; i++)
{
printf("%d ", h[i]);
}
printf("\n");
int* d;
hipMalloc(&d, N*sizeof(int));
hipMemcpy(d, h, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( FindMin) , dim3(1), dim3(N/2) , 0, 0, d);
hipDeviceSynchronize();
int *result;
result = (int*)malloc(sizeof(int));
hipMemcpy(result, d, sizeof(int), hipMemcpyDeviceToHost);
printf("Min is: %d \n", result[0]);
hipFree(d);
free(h);
return 0;
}
| 0fe61894f301062bd6e83e0f3675d124c3c074c9.cu | #include <stdio.h>
#define N 4
//Note: N should always be in powers of 2 (like 2, 4, 8, 16, 32, ...) -Mohit Agrawal
__global__ void FindMin(int* input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
int fst = tid * step_size * 2;
int snd = fst + step_size;
if(input[fst] >= input[snd])
{
input[fst] = input[snd];
}
else
{
input[fst] = input[fst];
}
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main()
{
time_t t;
srand((unsigned) time(&t));
int *h;
h = (int*)malloc(N*sizeof(int));
for(int i=0; i<N; i++)
{
h[i] = rand()%N;
}
for(int i=0; i<N; i++)
{
printf("%d ", h[i]);
}
printf("\n");
int* d;
cudaMalloc(&d, N*sizeof(int));
cudaMemcpy(d, h, N*sizeof(int), cudaMemcpyHostToDevice);
FindMin <<<1, N/2 >>>(d);
cudaDeviceSynchronize();
int *result;
result = (int*)malloc(sizeof(int));
cudaMemcpy(result, d, sizeof(int), cudaMemcpyDeviceToHost);
printf("Min is: %d \n", result[0]);
cudaFree(d);
free(h);
return 0;
}
|
c5e8768c16bb7856c2bf6a71868f021eea7dad32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
static const __device__ __constant__ uint8_t g_list2struct[16] =
{
0, 1, 2, ST_FLD_BYTE,
ST_FLD_DOUBLE, 5, ST_FLD_I16, 7,
ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY,
ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST
};
struct byte_stream_s
{
const uint8_t *cur;
const uint8_t *end;
const uint8_t *base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
inline __device__ unsigned int getb(byte_stream_s *bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
__device__ uint32_t get_u32(byte_stream_s *bs)
{
uint32_t v = 0, l = 0, c;
do
{
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
inline __device__ int32_t get_i32(byte_stream_s *bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s *bs, int t)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0)
{
rep_cnt--;
}
else if (struct_depth != 0)
{
int c;
do {
c = getb(bs);
if (!c)
--struct_depth;
} while (!c && struct_depth);
if (!struct_depth)
break;
t = c & 0xf;
if (!(c & 0xf0))
get_i32(bs);
}
switch (t)
{
case ST_FLD_TRUE:
case ST_FLD_FALSE:
break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64:
get_u32(bs);
break;
case ST_FLD_BYTE:
skip_bytes(bs, 1);
break;
case ST_FLD_DOUBLE:
skip_bytes(bs, 8);
break;
case ST_FLD_BINARY:
skip_bytes(bs, get_u32(bs));
break;
case ST_FLD_LIST:
case ST_FLD_SET:
{ // NOTE: skipping a list of lists is not handled
int c = getb(bs);
int n = c >> 4;
if (n == 0xf)
n = get_u32(bs);
t = g_list2struct[c & 0xf];
if (t == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
}
break;
case ST_FLD_STRUCT:
struct_depth++;
break;
}
} while (rep_cnt || struct_depth);
}
#define PARQUET_BEGIN_STRUCT(fn) \
__device__ bool fn(byte_stream_s *bs) \
{ \
int fld = 0; \
for (;;) \
{ \
int c, t, f; \
c = getb(bs); \
if (!c) \
break; \
f = c >> 4; \
t = c & 0xf; \
fld = (f) ? fld+f : get_i32(bs); \
switch(fld) { \
#define PARQUET_FLD_ENUM(id, m, mt) \
case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_INT32(id, m) \
case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_STRUCT(id, m) \
case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \
#define PARQUET_END_STRUCT() \
default: \
skip_struct_field(bs, t); \
break; \
} \
} \
return true; \
} \
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_INT32(3, page.num_rows)
PARQUET_FLD_ENUM(4, page.encoding, Encoding);
PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParsePageHeader)
PARQUET_FLD_ENUM(1, page_type, PageType)
PARQUET_FLD_INT32(2, page.uncompressed_page_size)
PARQUET_FLD_INT32(3, page.compressed_page_size)
PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader)
PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader)
PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2)
PARQUET_END_STRUCT()
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ byte_stream_s bs_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
byte_stream_s * const bs = &bs_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk < num_chunks)
{
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo *page_info;
if (!t)
{
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
SYNCWARP();
while (values_found < num_values && bs->cur < bs->end)
{
int index_out = -1;
if (t == 0)
{
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0)
{
switch (bs->page_type)
{
case DATA_PAGE:
// TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time
// -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows
bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values
// Fall-through to V2
case DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_values;
break;
case DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default:
index_out = -1;
break;
}
bs->page.page_data = const_cast<uint8_t *>(bs->cur);
bs->cur += bs->page.compressed_page_size;
}
else
{
bs->cur = bs->end;
}
}
index_out = SHFL0(index_out);
if (index_out >= 0 && index_out < max_num_pages)
{
// NOTE: Assumes that sizeof(PageInfo) <= 128
if (t < sizeof(PageInfo) / sizeof(uint32_t))
{
((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t];
}
}
num_values = SHFL0(num_values);
SYNCWARP();
}
if (t == 0)
{
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk >= num_chunks)
{
return;
}
if (!t && ck->num_dict_pages > 0 && ck->str_dict_index)
{
// Data type to describe a string
nvstrdesc_s *dict_index = ck->str_dict_index;
const uint8_t *dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++)
{
int len = 0;
if (cur + 4 <= dict_size)
{
len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size)
{
pos = cur;
cur = cur + 4 + len;
}
else
{
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].ptr = (const char *)(dict + pos + 4);
dict_index[i].count = len;
}
}
}
hipError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks,
int32_t num_chunks,
hipStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks);
return hipSuccess;
}
hipError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks,
int32_t num_chunks,
hipStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks);
return hipSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| c5e8768c16bb7856c2bf6a71868f021eea7dad32.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
static const __device__ __constant__ uint8_t g_list2struct[16] =
{
0, 1, 2, ST_FLD_BYTE,
ST_FLD_DOUBLE, 5, ST_FLD_I16, 7,
ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY,
ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST
};
struct byte_stream_s
{
const uint8_t *cur;
const uint8_t *end;
const uint8_t *base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
inline __device__ unsigned int getb(byte_stream_s *bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
__device__ uint32_t get_u32(byte_stream_s *bs)
{
uint32_t v = 0, l = 0, c;
do
{
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
inline __device__ int32_t get_i32(byte_stream_s *bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s *bs, int t)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0)
{
rep_cnt--;
}
else if (struct_depth != 0)
{
int c;
do {
c = getb(bs);
if (!c)
--struct_depth;
} while (!c && struct_depth);
if (!struct_depth)
break;
t = c & 0xf;
if (!(c & 0xf0))
get_i32(bs);
}
switch (t)
{
case ST_FLD_TRUE:
case ST_FLD_FALSE:
break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64:
get_u32(bs);
break;
case ST_FLD_BYTE:
skip_bytes(bs, 1);
break;
case ST_FLD_DOUBLE:
skip_bytes(bs, 8);
break;
case ST_FLD_BINARY:
skip_bytes(bs, get_u32(bs));
break;
case ST_FLD_LIST:
case ST_FLD_SET:
{ // NOTE: skipping a list of lists is not handled
int c = getb(bs);
int n = c >> 4;
if (n == 0xf)
n = get_u32(bs);
t = g_list2struct[c & 0xf];
if (t == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
}
break;
case ST_FLD_STRUCT:
struct_depth++;
break;
}
} while (rep_cnt || struct_depth);
}
#define PARQUET_BEGIN_STRUCT(fn) \
__device__ bool fn(byte_stream_s *bs) \
{ \
int fld = 0; \
for (;;) \
{ \
int c, t, f; \
c = getb(bs); \
if (!c) \
break; \
f = c >> 4; \
t = c & 0xf; \
fld = (f) ? fld+f : get_i32(bs); \
switch(fld) { \
#define PARQUET_FLD_ENUM(id, m, mt) \
case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_INT32(id, m) \
case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \
#define PARQUET_FLD_STRUCT(id, m) \
case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \
#define PARQUET_END_STRUCT() \
default: \
skip_struct_field(bs, t); \
break; \
} \
} \
return true; \
} \
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_ENUM(2, page.encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2)
PARQUET_FLD_INT32(1, page.num_values)
PARQUET_FLD_INT32(3, page.num_rows)
PARQUET_FLD_ENUM(4, page.encoding, Encoding);
PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding);
PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding);
PARQUET_END_STRUCT()
PARQUET_BEGIN_STRUCT(gpuParsePageHeader)
PARQUET_FLD_ENUM(1, page_type, PageType)
PARQUET_FLD_INT32(2, page.uncompressed_page_size)
PARQUET_FLD_INT32(3, page.compressed_page_size)
PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader)
PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader)
PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2)
PARQUET_END_STRUCT()
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ byte_stream_s bs_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
byte_stream_s * const bs = &bs_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk < num_chunks)
{
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo *page_info;
if (!t)
{
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
SYNCWARP();
while (values_found < num_values && bs->cur < bs->end)
{
int index_out = -1;
if (t == 0)
{
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0)
{
switch (bs->page_type)
{
case DATA_PAGE:
// TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time
// -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows
bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values
// Fall-through to V2
case DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_values;
break;
case DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default:
index_out = -1;
break;
}
bs->page.page_data = const_cast<uint8_t *>(bs->cur);
bs->cur += bs->page.compressed_page_size;
}
else
{
bs->cur = bs->end;
}
}
index_out = SHFL0(index_out);
if (index_out >= 0 && index_out < max_num_pages)
{
// NOTE: Assumes that sizeof(PageInfo) <= 128
if (t < sizeof(PageInfo) / sizeof(uint32_t))
{
((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t];
}
}
num_values = SHFL0(num_values);
SYNCWARP();
}
if (t == 0)
{
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
**/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int t = threadIdx.x & 0x1f;
int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5);
ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5];
if (chunk < num_chunks)
{
// NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128
if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t))
{
((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t];
}
}
__syncthreads();
if (chunk >= num_chunks)
{
return;
}
if (!t && ck->num_dict_pages > 0 && ck->str_dict_index)
{
// Data type to describe a string
nvstrdesc_s *dict_index = ck->str_dict_index;
const uint8_t *dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++)
{
int len = 0;
if (cur + 4 <= dict_size)
{
len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size)
{
pos = cur;
cur = cur + 4 + len;
}
else
{
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].ptr = (const char *)(dict + pos + 4);
dict_index[i].count = len;
}
}
}
cudaError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks,
int32_t num_chunks,
cudaStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks);
return cudaSuccess;
}
cudaError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks,
int32_t num_chunks,
cudaStream_t stream) {
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks);
return cudaSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
1cdab04073862b8d1f1206a8e6fe687f9be27d20.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/Lerp.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/OpMathType.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_tensor_name[] = "lerp_tensor";
void lerp_tensor_kernel(at::TensorIteratorBase& iter) {
auto dtype = iter.common_dtype();
if(at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto lerp_tensor_string = jiterator_stringify(
template <typename T>
T lerp_tensor(T self_val, T end_val, T weight_val) {
return (std::abs(weight_val) < 0.5)
? self_val + weight_val * (end_val - self_val)
: end_val -
(end_val - self_val) * (static_cast<T>(1) - weight_val);
}
); // lerp_tensor_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
jitted_gpu_kernel<
/*name=*/ lerp_tensor_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 3>(iter, lerp_tensor_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
at::native::gpu_kernel(
iter,
[] GPU_LAMBDA(
scalar_t self_val,
scalar_t end_val,
scalar_t weight_val) -> scalar_t {
opmath_t self_val_f = self_val;
opmath_t end_val_f = end_val;
opmath_t weight_val_f = weight_val;
return lerp(self_val, end_val, weight_val);
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
dtype, "lerp_cuda",
[&] {
at::native::gpu_kernel(
iter,
[] GPU_LAMBDA(
scalar_t self_val,
scalar_t end_val,
scalar_t weight_val) -> scalar_t {
return lerp(self_val, end_val, weight_val);
});
});
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_scalar_name[] = "lerp_scalar";
void lerp_scalar_kernel(at::TensorIteratorBase& iter, const c10::Scalar& weight) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto lerp_scalar_string = jiterator_stringify(
template <typename T>
T lerp_scalar(T self_val, T end_val, T weight_val) {
return (std::abs(weight_val) < 0.5)
? self_val + weight_val * (end_val - self_val)
: end_val -
(end_val - self_val) * (static_cast<T>(1) - weight_val);
}
); // lerp_scalar_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
auto weight_val = weight.to<opmath_t>();
jitted_gpu_kernel<
/*name=*/ lerp_scalar_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(
iter,
lerp_scalar_string,
/*scalar_pos=*/ at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/ 0,
/*extra_args=*/ std::make_tuple(weight_val));
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
auto weight_val = weight.to<opmath_t>();
at::native::gpu_kernel(
iter,
[=] GPU_LAMBDA(scalar_t self_val, scalar_t end_val) {
opmath_t self_val_f = self_val;
opmath_t end_val_f = end_val;
return lerp(self_val, end_val, weight_val);
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
dtype, "lerp_cuda",
[&]{
using opmath_t = at::opmath_type<scalar_t>;
auto weight_val = weight.to<opmath_t>();
at::native::gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t self_val, scalar_t end_val) {
return lerp(self_val, end_val, weight_val);
});
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(lerp_kernel_tensor_weight, &lerp_tensor_kernel);
REGISTER_DISPATCH(lerp_kernel_scalar_weight, &lerp_scalar_kernel);
} // namespace at::native
| 1cdab04073862b8d1f1206a8e6fe687f9be27d20.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/Lerp.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/OpMathType.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_tensor_name[] = "lerp_tensor";
void lerp_tensor_kernel(at::TensorIteratorBase& iter) {
auto dtype = iter.common_dtype();
if(at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto lerp_tensor_string = jiterator_stringify(
template <typename T>
T lerp_tensor(T self_val, T end_val, T weight_val) {
return (std::abs(weight_val) < 0.5)
? self_val + weight_val * (end_val - self_val)
: end_val -
(end_val - self_val) * (static_cast<T>(1) - weight_val);
}
); // lerp_tensor_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
jitted_gpu_kernel<
/*name=*/ lerp_tensor_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 3>(iter, lerp_tensor_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
at::native::gpu_kernel(
iter,
[] GPU_LAMBDA(
scalar_t self_val,
scalar_t end_val,
scalar_t weight_val) -> scalar_t {
opmath_t self_val_f = self_val;
opmath_t end_val_f = end_val;
opmath_t weight_val_f = weight_val;
return lerp(self_val, end_val, weight_val);
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
dtype, "lerp_cuda",
[&] {
at::native::gpu_kernel(
iter,
[] GPU_LAMBDA(
scalar_t self_val,
scalar_t end_val,
scalar_t weight_val) -> scalar_t {
return lerp(self_val, end_val, weight_val);
});
});
}
}
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_scalar_name[] = "lerp_scalar";
void lerp_scalar_kernel(at::TensorIteratorBase& iter, const c10::Scalar& weight) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto lerp_scalar_string = jiterator_stringify(
template <typename T>
T lerp_scalar(T self_val, T end_val, T weight_val) {
return (std::abs(weight_val) < 0.5)
? self_val + weight_val * (end_val - self_val)
: end_val -
(end_val - self_val) * (static_cast<T>(1) - weight_val);
}
); // lerp_scalar_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
auto weight_val = weight.to<opmath_t>();
jitted_gpu_kernel<
/*name=*/ lerp_scalar_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(
iter,
lerp_scalar_string,
/*scalar_pos=*/ at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/ 0,
/*extra_args=*/ std::make_tuple(weight_val));
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "lerp_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
auto weight_val = weight.to<opmath_t>();
at::native::gpu_kernel(
iter,
[=] GPU_LAMBDA(scalar_t self_val, scalar_t end_val) {
opmath_t self_val_f = self_val;
opmath_t end_val_f = end_val;
return lerp(self_val, end_val, weight_val);
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
dtype, "lerp_cuda",
[&]{
using opmath_t = at::opmath_type<scalar_t>;
auto weight_val = weight.to<opmath_t>();
at::native::gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t self_val, scalar_t end_val) {
return lerp(self_val, end_val, weight_val);
});
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(lerp_kernel_tensor_weight, &lerp_tensor_kernel);
REGISTER_DISPATCH(lerp_kernel_scalar_weight, &lerp_scalar_kernel);
} // namespace at::native
|
52aba216dfca376e3e1eda262a03b386e2c85e45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kmean.h"
#include "Feat_comm.h"
#include "memory_srlr.h"
#include "kmean_kernel.h"
#include "wtime.h"
#define LOG2PI (log(2*PI))
const int N = 1000000;
KMean::KMean() {
m_WeightIndex = NULL;
dimObjects = NULL;
memberShip = NULL;
device_memberShip = NULL;
device_dimObjects = NULL;
clusterSize = NULL;
device_clusterSize = NULL;
dimClusters = NULL;
device_dimClusters = NULL;
dimDiagCovs = NULL;
device_diagCovs = NULL;
dMats = NULL;
device_dMats = NULL;
}
KMean::~KMean() {
/*
if(m_WeightIndex) {
free(m_WeightIndex);
m_WeightIndex=NULL;
}
*/
Free2D(dimObjects);
Free2D(dimClusters);
Free(memberShip);
if(device_dimObjects) {
hipFree(device_dimObjects);
device_dimObjects = NULL;
}
if(device_memberShip) {
hipFree(device_memberShip);
device_memberShip = NULL;
}
if(device_clusterSize) {
hipFree(device_clusterSize);
device_clusterSize = NULL;
}
Free(clusterSize);
Free2D(dimDiagCovs);
if(device_dimClusters) {
hipFree(device_dimClusters);
device_dimClusters = NULL;
}
if(device_diagCovs) {
hipFree(device_diagCovs);
device_diagCovs = NULL;
}
Free(dMats);
if(device_dMats) {
hipFree(device_dMats);
device_dMats = NULL;
}
}
void KMean::KMeanCluster() {
int numOfBlock = (numObjs - 1) / FIND_NEAREST_BLOCKSIZE + 1;
int shareMemorySize = numDims * numClusters * sizeof(float);
if(shareMemorySize > 49152) {
shareMemorySize = 1;
}
hipLaunchKernelGGL(( cuda_find_nearest_cluster), dim3(numOfBlock), dim3(FIND_NEAREST_BLOCKSIZE), shareMemorySize, 0, shareMemorySize, device_dimObjects, device_dimClusters, numObjs, numDims, numClusters, device_memberShip);
numOfBlock = (numObjs - 1) / ACCUMULATE_BLOCKSIZE + 1;
hipMemset(device_dimClusters, 0, numClusters * numDims * sizeof(float));
hipMemset(device_clusterSize, 0, numClusters * sizeof(int));
hipLaunchKernelGGL(( cuda_accumulate_clusters), dim3(numOfBlock), dim3(ACCUMULATE_BLOCKSIZE), 0, 0, device_dimObjects, device_memberShip, numObjs, numDims, numClusters, device_clusterSize, device_dimClusters);
hipLaunchKernelGGL(( cuda_average_clusters), dim3(numDims), dim3(numClusters), 0, 0, device_clusterSize, device_dimClusters);
}
void KMean::DataPrepare(struct Features &features, int MaxMixNum) {
this->features = &features;
numDims = features.featureDim;
/// COPY features into buffers
numObjs = 0;
for(int idx = 0;idx < features.nFeatures;idx++) {
numObjs += features.featureSize[idx];
}
Malloc2D(dimObjects, numDims, numObjs, float);
memberShip = (int *) malloc(sizeof(int) * numObjs);
hipMalloc(&device_memberShip, sizeof(int) * numObjs);
int featureIdx = 0;
for(int idx = 0;idx < features.nFeatures; idx++) {
for(int idy = 0; idy < features.featureSize[idx]; idy ++) {
for(int idz = 0;idz < numDims; idz ++) {
dimObjects[idz][featureIdx] = features.features[idx][idy * numDims + idz];
}
featureIdx++;
}
}
Malloc2D(dimClusters, numDims, MaxMixNum, float);
hipMalloc(&device_dimObjects, numObjs * numDims * sizeof(float));
hipMalloc(&device_dimClusters, MaxMixNum * numDims * sizeof(float));
Malloc2D(dimDiagCovs, numDims, MaxMixNum, float);
hipMalloc(&device_diagCovs, MaxMixNum * numDims * sizeof(float));
hipMemcpy(device_dimObjects, dimObjects[0], numObjs * numDims * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&device_clusterSize, MaxMixNum * sizeof(int));
clusterSize = (int *) malloc(MaxMixNum * sizeof(int));
dMats = (float *) malloc(MaxMixNum * sizeof(float));
hipMalloc(&device_dMats, MaxMixNum * sizeof(float));
}
void KMean::KMeanInitalize() {
numClusters = m_nMixNum;
for(int j=0;j < numDims;j++) {
for(int m=0;m < numClusters;m++) {
dimClusters[0][j*numClusters + m] = m_pGmmModel[0].pGauss[m].pfMean[j];
}
}
hipMemcpy(device_dimClusters, dimClusters[0], numClusters * numDims * sizeof(float), hipMemcpyHostToDevice);
}
void KMean::KMeanFinalize() {
// clusters
hipMemcpy(dimClusters[0], device_dimClusters, numClusters * numDims * sizeof(float), hipMemcpyDeviceToHost);
// clusterSize
hipMemcpy(clusterSize, device_clusterSize, numClusters * sizeof(int), hipMemcpyDeviceToHost);
for(int gaussIdx = 0; gaussIdx < numClusters; gaussIdx++) {
m_pGmmModel[0].pfWeight[gaussIdx] = (float)clusterSize[gaussIdx] / numObjs;
}
/// diag Covariance
int numOfBlock = (numObjs - 1) / ACCUMULATE_BLOCKSIZE + 1;
hipMemset(device_diagCovs, 0, numClusters * numDims * sizeof(float));
hipLaunchKernelGGL(( cuda_accumulate_diagcovs), dim3(numOfBlock), dim3(ACCUMULATE_BLOCKSIZE), 0, 0, device_dimObjects, device_memberShip, device_dimClusters, numObjs, numDims, numClusters, device_diagCovs);
hipMemset(device_dMats, 0, numClusters * sizeof(float));
hipLaunchKernelGGL(( cuda_average_diagcovs), dim3(numDims), dim3(numClusters), 0, 0, device_clusterSize, device_diagCovs, device_dMats);
hipMemcpy(dMats, device_dMats, numClusters * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(dimDiagCovs[0], device_diagCovs, numClusters * numDims * sizeof(float), hipMemcpyDeviceToHost);
for(int clusterIdx = 0;clusterIdx < numClusters; clusterIdx++) {
m_pGmmModel[0].pGauss[clusterIdx].dMat = (dMats[clusterIdx] - numDims * LOG2PI) * 0.5 + log(m_pGmmModel[0].pfWeight[clusterIdx]);
for(int dimIdx = 0; dimIdx < numDims; dimIdx ++) {
m_pGmmModel[0].pGauss[clusterIdx].pfDiagCov[dimIdx] = dimDiagCovs[0][dimIdx * numClusters + clusterIdx];
m_pGmmModel[0].pGauss[clusterIdx].pfMean[dimIdx] = dimClusters[0][dimIdx * numClusters + clusterIdx];
}
}
}
void KMean::KMeanIteration() {
KMeanCluster();
}
void KMean::KMeanMain(int KMeanIterNum) {
/// New Splited Mixture Gaussian Model
KMeanInitalize();
for(int i = 0;i < KMeanIterNum;i++) {
KMeanIteration();
}
KMeanFinalize();
}
| 52aba216dfca376e3e1eda262a03b386e2c85e45.cu | #include "kmean.h"
#include "Feat_comm.h"
#include "memory_srlr.h"
#include "kmean_kernel.h"
#include "wtime.h"
#define LOG2PI (log(2*PI))
const int N = 1000000;
KMean::KMean() {
m_WeightIndex = NULL;
dimObjects = NULL;
memberShip = NULL;
device_memberShip = NULL;
device_dimObjects = NULL;
clusterSize = NULL;
device_clusterSize = NULL;
dimClusters = NULL;
device_dimClusters = NULL;
dimDiagCovs = NULL;
device_diagCovs = NULL;
dMats = NULL;
device_dMats = NULL;
}
KMean::~KMean() {
/*
if(m_WeightIndex) {
free(m_WeightIndex);
m_WeightIndex=NULL;
}
*/
Free2D(dimObjects);
Free2D(dimClusters);
Free(memberShip);
if(device_dimObjects) {
cudaFree(device_dimObjects);
device_dimObjects = NULL;
}
if(device_memberShip) {
cudaFree(device_memberShip);
device_memberShip = NULL;
}
if(device_clusterSize) {
cudaFree(device_clusterSize);
device_clusterSize = NULL;
}
Free(clusterSize);
Free2D(dimDiagCovs);
if(device_dimClusters) {
cudaFree(device_dimClusters);
device_dimClusters = NULL;
}
if(device_diagCovs) {
cudaFree(device_diagCovs);
device_diagCovs = NULL;
}
Free(dMats);
if(device_dMats) {
cudaFree(device_dMats);
device_dMats = NULL;
}
}
void KMean::KMeanCluster() {
int numOfBlock = (numObjs - 1) / FIND_NEAREST_BLOCKSIZE + 1;
int shareMemorySize = numDims * numClusters * sizeof(float);
if(shareMemorySize > 49152) {
shareMemorySize = 1;
}
cuda_find_nearest_cluster<<<numOfBlock, FIND_NEAREST_BLOCKSIZE, shareMemorySize>>>(shareMemorySize, device_dimObjects, device_dimClusters, numObjs, numDims, numClusters, device_memberShip);
numOfBlock = (numObjs - 1) / ACCUMULATE_BLOCKSIZE + 1;
cudaMemset(device_dimClusters, 0, numClusters * numDims * sizeof(float));
cudaMemset(device_clusterSize, 0, numClusters * sizeof(int));
cuda_accumulate_clusters<<<numOfBlock, ACCUMULATE_BLOCKSIZE>>>(device_dimObjects, device_memberShip, numObjs, numDims, numClusters, device_clusterSize, device_dimClusters);
cuda_average_clusters<<<numDims, numClusters>>> (device_clusterSize, device_dimClusters);
}
void KMean::DataPrepare(struct Features &features, int MaxMixNum) {
this->features = &features;
numDims = features.featureDim;
/// COPY features into buffers
numObjs = 0;
for(int idx = 0;idx < features.nFeatures;idx++) {
numObjs += features.featureSize[idx];
}
Malloc2D(dimObjects, numDims, numObjs, float);
memberShip = (int *) malloc(sizeof(int) * numObjs);
cudaMalloc(&device_memberShip, sizeof(int) * numObjs);
int featureIdx = 0;
for(int idx = 0;idx < features.nFeatures; idx++) {
for(int idy = 0; idy < features.featureSize[idx]; idy ++) {
for(int idz = 0;idz < numDims; idz ++) {
dimObjects[idz][featureIdx] = features.features[idx][idy * numDims + idz];
}
featureIdx++;
}
}
Malloc2D(dimClusters, numDims, MaxMixNum, float);
cudaMalloc(&device_dimObjects, numObjs * numDims * sizeof(float));
cudaMalloc(&device_dimClusters, MaxMixNum * numDims * sizeof(float));
Malloc2D(dimDiagCovs, numDims, MaxMixNum, float);
cudaMalloc(&device_diagCovs, MaxMixNum * numDims * sizeof(float));
cudaMemcpy(device_dimObjects, dimObjects[0], numObjs * numDims * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&device_clusterSize, MaxMixNum * sizeof(int));
clusterSize = (int *) malloc(MaxMixNum * sizeof(int));
dMats = (float *) malloc(MaxMixNum * sizeof(float));
cudaMalloc(&device_dMats, MaxMixNum * sizeof(float));
}
void KMean::KMeanInitalize() {
numClusters = m_nMixNum;
for(int j=0;j < numDims;j++) {
for(int m=0;m < numClusters;m++) {
dimClusters[0][j*numClusters + m] = m_pGmmModel[0].pGauss[m].pfMean[j];
}
}
cudaMemcpy(device_dimClusters, dimClusters[0], numClusters * numDims * sizeof(float), cudaMemcpyHostToDevice);
}
void KMean::KMeanFinalize() {
// clusters
cudaMemcpy(dimClusters[0], device_dimClusters, numClusters * numDims * sizeof(float), cudaMemcpyDeviceToHost);
// clusterSize
cudaMemcpy(clusterSize, device_clusterSize, numClusters * sizeof(int), cudaMemcpyDeviceToHost);
for(int gaussIdx = 0; gaussIdx < numClusters; gaussIdx++) {
m_pGmmModel[0].pfWeight[gaussIdx] = (float)clusterSize[gaussIdx] / numObjs;
}
/// diag Covariance
int numOfBlock = (numObjs - 1) / ACCUMULATE_BLOCKSIZE + 1;
cudaMemset(device_diagCovs, 0, numClusters * numDims * sizeof(float));
cuda_accumulate_diagcovs<<<numOfBlock, ACCUMULATE_BLOCKSIZE>>>(device_dimObjects, device_memberShip, device_dimClusters, numObjs, numDims, numClusters, device_diagCovs);
cudaMemset(device_dMats, 0, numClusters * sizeof(float));
cuda_average_diagcovs<<<numDims, numClusters>>>(device_clusterSize, device_diagCovs, device_dMats);
cudaMemcpy(dMats, device_dMats, numClusters * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(dimDiagCovs[0], device_diagCovs, numClusters * numDims * sizeof(float), cudaMemcpyDeviceToHost);
for(int clusterIdx = 0;clusterIdx < numClusters; clusterIdx++) {
m_pGmmModel[0].pGauss[clusterIdx].dMat = (dMats[clusterIdx] - numDims * LOG2PI) * 0.5 + log(m_pGmmModel[0].pfWeight[clusterIdx]);
for(int dimIdx = 0; dimIdx < numDims; dimIdx ++) {
m_pGmmModel[0].pGauss[clusterIdx].pfDiagCov[dimIdx] = dimDiagCovs[0][dimIdx * numClusters + clusterIdx];
m_pGmmModel[0].pGauss[clusterIdx].pfMean[dimIdx] = dimClusters[0][dimIdx * numClusters + clusterIdx];
}
}
}
void KMean::KMeanIteration() {
KMeanCluster();
}
void KMean::KMeanMain(int KMeanIterNum) {
/// New Splited Mixture Gaussian Model
KMeanInitalize();
for(int i = 0;i < KMeanIterNum;i++) {
KMeanIteration();
}
KMeanFinalize();
}
|
4814a1878646052d5010cd03e70894dd48d570f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
/*
* This example uses the cuRAND host and device API to replace the system rand()
* call by pre-generating large chunks of random numbers before fetching one at
* at time. If there are no unused random numbers left, a new batch is generated
* synchronously.
*/
/*
* initialize_state initializes cuRAND device state
*/
__global__ void initialize_state(hiprandState_t *states)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(9384, tid, 0, states + tid);
}
/*
* refill_randoms uses the cuRAND device API to generate N random values using
* the states passed to the kernel.
*/
__global__ void refill_randoms(float *dRand, int N, hiprandState_t *states)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
hiprandState_t *state = states + tid;
for (i = tid; i < N; i += nthreads)
{
dRand[i] = hiprand_uniform(state);
}
}
/*
* An implementation of rand() that uses the cuRAND device API.
*/
float cuda_device_rand()
{
static hiprandState_t *states = NULL;
static float *dRand = NULL;
static float *hRand = NULL;
static int dRand_length = 1000000;
static int dRand_used = 1000000;
int threads_per_block = 256;
int blocks_per_grid = 30;
if (dRand == NULL)
{
/*
* If the cuRAND state hasn't been initialized yet, pre-allocate memory
* to store the generated random values in as well as the cuRAND device
* state objects.
*/
hipMalloc((void **)&dRand, sizeof(float) * dRand_length);
hipMalloc((void **)&states, sizeof(hiprandState_t) * threads_per_block * blocks_per_grid);
hRand = (float *)malloc(sizeof(float) * dRand_length);
// Initialize states on the device
initialize_state << <blocks_per_grid, threads_per_block >> >(states);
}
if (dRand_used == dRand_length)
{
/*
* If all pre-generated random numbers have been consumed, regenerate a
* new batch.
*/
hipLaunchKernelGGL(( refill_randoms) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, dRand, dRand_length, states);
hipMemcpy(hRand, dRand, sizeof(float) * dRand_length, hipMemcpyDeviceToHost);
dRand_used = 0;
}
// Return the next pre-generated random number
return hRand[dRand_used++];
}
/*
* An implementation of rand() that uses the cuRAND host API.
*/
float cuda_host_rand()
{
static float *dRand = NULL;
static float *hRand = NULL;
hiprandGenerator_t randGen;
static int dRand_length = 1000000;
static int dRand_used = 1000000;
if (dRand == NULL)
{
/*
* If the cuRAND state hasn't been initialized yet, construct a cuRAND
* host generator and pre-allocate memory to store the generated random
* values in.
*/
hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_DEFAULT);
hipMalloc((void **)&dRand, sizeof(float) * dRand_length);
hRand = (float *)malloc(sizeof(float) * dRand_length);
}
if (dRand_used == dRand_length)
{
/*
* If all pre-generated random numbers have been consumed, regenerate a
* new batch using hiprandGenerateUniform.
*/
hiprandGenerateUniform(randGen, dRand, dRand_length);
hipMemcpy(hRand, dRand, sizeof(float) * dRand_length, hipMemcpyDeviceToHost);
dRand_used = 0;
}
// Return the next pre-generated random number
return hRand[dRand_used++];
}
/*
* A reference implementation that uses system rand().
*/
float host_rand()
{
return (float)rand() / (float)RAND_MAX;
}
int main(int argc, char **argv)
{
int i;
int N = 8388608;
/*
* Allocate N random numbers from each of the random number generation
* functions implemented.
*/
for (i = 0; i < N; i++)
{
float h = host_rand();
float d = cuda_host_rand();
float dd = cuda_device_rand();
printf("%2.4f %2.4f %2.4f\n", h, d, dd);
getchar();
}
return 0;
} | 4814a1878646052d5010cd03e70894dd48d570f3.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
/*
* This example uses the cuRAND host and device API to replace the system rand()
* call by pre-generating large chunks of random numbers before fetching one at
* at time. If there are no unused random numbers left, a new batch is generated
* synchronously.
*/
/*
* initialize_state initializes cuRAND device state
*/
__global__ void initialize_state(curandState *states)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(9384, tid, 0, states + tid);
}
/*
* refill_randoms uses the cuRAND device API to generate N random values using
* the states passed to the kernel.
*/
__global__ void refill_randoms(float *dRand, int N, curandState *states)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
curandState *state = states + tid;
for (i = tid; i < N; i += nthreads)
{
dRand[i] = curand_uniform(state);
}
}
/*
* An implementation of rand() that uses the cuRAND device API.
*/
float cuda_device_rand()
{
static curandState *states = NULL;
static float *dRand = NULL;
static float *hRand = NULL;
static int dRand_length = 1000000;
static int dRand_used = 1000000;
int threads_per_block = 256;
int blocks_per_grid = 30;
if (dRand == NULL)
{
/*
* If the cuRAND state hasn't been initialized yet, pre-allocate memory
* to store the generated random values in as well as the cuRAND device
* state objects.
*/
cudaMalloc((void **)&dRand, sizeof(float) * dRand_length);
cudaMalloc((void **)&states, sizeof(curandState) * threads_per_block * blocks_per_grid);
hRand = (float *)malloc(sizeof(float) * dRand_length);
// Initialize states on the device
initialize_state << <blocks_per_grid, threads_per_block >> >(states);
}
if (dRand_used == dRand_length)
{
/*
* If all pre-generated random numbers have been consumed, regenerate a
* new batch.
*/
refill_randoms <<<blocks_per_grid, threads_per_block >>>(dRand, dRand_length, states);
cudaMemcpy(hRand, dRand, sizeof(float) * dRand_length, cudaMemcpyDeviceToHost);
dRand_used = 0;
}
// Return the next pre-generated random number
return hRand[dRand_used++];
}
/*
* An implementation of rand() that uses the cuRAND host API.
*/
float cuda_host_rand()
{
static float *dRand = NULL;
static float *hRand = NULL;
curandGenerator_t randGen;
static int dRand_length = 1000000;
static int dRand_used = 1000000;
if (dRand == NULL)
{
/*
* If the cuRAND state hasn't been initialized yet, construct a cuRAND
* host generator and pre-allocate memory to store the generated random
* values in.
*/
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT);
cudaMalloc((void **)&dRand, sizeof(float) * dRand_length);
hRand = (float *)malloc(sizeof(float) * dRand_length);
}
if (dRand_used == dRand_length)
{
/*
* If all pre-generated random numbers have been consumed, regenerate a
* new batch using curandGenerateUniform.
*/
curandGenerateUniform(randGen, dRand, dRand_length);
cudaMemcpy(hRand, dRand, sizeof(float) * dRand_length, cudaMemcpyDeviceToHost);
dRand_used = 0;
}
// Return the next pre-generated random number
return hRand[dRand_used++];
}
/*
* A reference implementation that uses system rand().
*/
float host_rand()
{
return (float)rand() / (float)RAND_MAX;
}
int main(int argc, char **argv)
{
int i;
int N = 8388608;
/*
* Allocate N random numbers from each of the random number generation
* functions implemented.
*/
for (i = 0; i < N; i++)
{
float h = host_rand();
float d = cuda_host_rand();
float dd = cuda_device_rand();
printf("%2.4f %2.4f %2.4f\n", h, d, dd);
getchar();
}
return 0;
} |
e29077f5828513c4a0d0295d46cb779baa317820.hip | // !!! This is a file automatically generated by hipify!!!
/**Nvidia GPU
* GPU SMStreaming Multiprocessorblockthread
* GPU
* global memoryconstant memorytexture memory
* shared memoryregisterslocal memory
* L1 L2
* CUDA
*/
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 5
// Define kernel function with global memory.
__global__ void gpu_global_memory(int *device_a)
{
device_a[threadIdx.x] = threadIdx.x;
}
/**
*
* 1
* 2
*/
// Define kernel function with local memory.
__global__ void gpu_local_memory(int device_in)
{
// kernel function
int t_local;
t_local = device_in * threadIdx.x;
printf("Value of Local variable in current thread is: %d \n", t_local);
}
// Define kernel function with shared memory.
__global__ void gpu_shared_memory(float *device_a)
{
// Defining local variables which are private to each thread.
int i, index = threadIdx.x;
float average, sum = 0.0f;
//Define shared memory.
// __shared__
__shared__ float sh_arr[10];
sh_arr[index] = device_a[index];
__syncthreads(); // This ensures all the writes to shared memory have completed
// finish MA
for (i = 0; i<= index; ++i)
{
sum += sh_arr[i];
}
average = sum / (index + 1.0f);
device_a[index] = average;
sh_arr[index] = average;
}
int main(int argc, char *argv[])
{
int host_a[N];
int *device_a;
// Malloc global memory on device.
// hipMalloc
hipMalloc((void**)&device_a, sizeof(int) * N);
// Copy host memory to device global memory.
hipMemcpy((void*)&device_a, (void*)host_a, sizeof(int) * N, hipMemcpyHostToDevice);
// Kernel call.
hipLaunchKernelGGL(( gpu_global_memory) , dim3(1), dim3(N), 0, 0, device_a);
// Copy device global memory to host memory.
hipMemcpy((void*)&host_a, (void*)device_a, sizeof(int) * N, hipMemcpyDeviceToHost);
//Testing the global memory.
printf("Array in global memory is: \n");
for (unsigned int i = 0; i < N; ++i)
{
printf("At index: %d ---> %d\n", i, host_a[i]);
}
// Testing the local memory.
printf("Use of Local Memory on GPU:\n");
gpu_local_memory << <1, N >> > (N);
hipDeviceSynchronize();
// Testing the shared memory.
float h_a[10];
float *d_a;
for (int i = 0; i < 10; ++i)
{
h_a[i] = i;
}
// allocate global memory on the device
hipMalloc((void **)&d_a, sizeof(float) * 10);
// now copy data from host memory to device memory
hipMemcpy((void *)d_a, (void *)h_a, sizeof(float) * 10, hipMemcpyHostToDevice);
// Call kernel function.
gpu_shared_memory << <1, 10 >> >(d_a);
// copy the modified array back to the host memory
hipMemcpy((void *)h_a, (void *)d_a, sizeof(float) * 10, hipMemcpyDeviceToHost);
printf("Use of Shared Memory on GPU: \n");
//Printing result on console
for (int i = 0; i < 10; ++i)
{
printf("The running average after %d element is %f \n", i, h_a[i]);
}
return 0;
} | e29077f5828513c4a0d0295d46cb779baa317820.cu | /**Nvidia GPU 存储器架构
* 在 GPU 上代码的执行被划分为流多处理器SM,Streaming Multiprocessor;块block;线程thread。
* 每一个存储器空间都有其特定的特征和用途,不同的访问速度和生命周期范围,详情查看具体 GPU 存储器架构图。
* 全局内存global memory;常量内存constant memory;纹理内存texture memory;
* 共享内存shared memory;寄存器堆registers;本地内存local memory。
* 高速缓存存储器,L1 L2 缓存,提高访问速度。
* CUDA 存储器的快慢有两方面:延迟低;带宽大;一般都是指延迟低。
*/
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 5
// Define kernel function with global memory.
__global__ void gpu_global_memory(int *device_a)
{
device_a[threadIdx.x] = threadIdx.x;
}
/**本地内存和寄存器堆对每一个线程都是唯一的。
* 寄存器溢出,有两种情况:
* 1、寄存器不够了;
* 2、某些情况下根本就不能放在寄存器中。
*/
// Define kernel function with local memory.
__global__ void gpu_local_memory(int device_in)
{
// 在 kernel function 内部声明的局部变量就是寄存器或者本地存储器中。
int t_local;
t_local = device_in * threadIdx.x;
printf("Value of Local variable in current thread is: %d \n", t_local);
}
// Define kernel function with shared memory.
__global__ void gpu_shared_memory(float *device_a)
{
// Defining local variables which are private to each thread.
int i, index = threadIdx.x;
float average, sum = 0.0f;
//Define shared memory.
// __shared__ 限定修饰的变量,存储在共享内存中。
__shared__ float sh_arr[10];
sh_arr[index] = device_a[index];
__syncthreads(); // This ensures all the writes to shared memory have completed
// finish MA 操作,就是计算数组中当前元素之前所有元素的平均值。
for (i = 0; i<= index; ++i)
{
sum += sh_arr[i];
}
average = sum / (index + 1.0f);
device_a[index] = average;
sh_arr[index] = average;
}
int main(int argc, char *argv[])
{
int host_a[N];
int *device_a;
// Malloc global memory on device.
// cudaMalloc 分配的存储器都是全局内存。
cudaMalloc((void**)&device_a, sizeof(int) * N);
// Copy host memory to device global memory.
cudaMemcpy((void*)&device_a, (void*)host_a, sizeof(int) * N, cudaMemcpyHostToDevice);
// Kernel call.
gpu_global_memory <<<1, N>>> (device_a);
// Copy device global memory to host memory.
cudaMemcpy((void*)&host_a, (void*)device_a, sizeof(int) * N, cudaMemcpyDeviceToHost);
//Testing the global memory.
printf("Array in global memory is: \n");
for (unsigned int i = 0; i < N; ++i)
{
printf("At index: %d ---> %d\n", i, host_a[i]);
}
// Testing the local memory.
printf("Use of Local Memory on GPU:\n");
gpu_local_memory << <1, N >> > (N);
cudaDeviceSynchronize();
// Testing the shared memory.
float h_a[10];
float *d_a;
for (int i = 0; i < 10; ++i)
{
h_a[i] = i;
}
// allocate global memory on the device
cudaMalloc((void **)&d_a, sizeof(float) * 10);
// now copy data from host memory to device memory
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(float) * 10, cudaMemcpyHostToDevice);
// Call kernel function.
gpu_shared_memory << <1, 10 >> >(d_a);
// copy the modified array back to the host memory
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(float) * 10, cudaMemcpyDeviceToHost);
printf("Use of Shared Memory on GPU: \n");
//Printing result on console
for (int i = 0; i < 10; ++i)
{
printf("The running average after %d element is %f \n", i, h_a[i]);
}
return 0;
} |
77a378c48975ceabcb630169e68d930bd0fd144e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "book.h"
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel(int* a, int* b, int* c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.f;
c[idx] = (as + bs) / 2;
}
}
int main(void) {
hipDeviceProp_t prop;
int whichDevice;
HANDLE_ERROR(hipGetDevice(&whichDevice));
HANDLE_ERROR(hipGetDeviceProperties(&prop, whichDevice));
if (!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speed up from streams\n");
}
else {
printf("Deprecated. Use instead asyncEngineCount\n");
}
if (prop.asyncEngineCount) {
printf("Number of asynchronous engines %d\n", prop.asyncEngineCount);
}
hipEvent_t start, stop;
float elapsedTime;
// .
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
// .
hipStream_t stream0, stream1;
HANDLE_ERROR(hipStreamCreate(&stream0));
HANDLE_ERROR(hipStreamCreate(&stream1));
int* host_a, * host_b, * host_c;
int* dev_a0, * dev_b0, * dev_c0;
int* dev_a1, * dev_b1, * dev_c1;
// GPU .
HANDLE_ERROR(hipMalloc((void**)&dev_a0, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b0, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c0, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_a1, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b1, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c1, N * sizeof(int)));
// .
HANDLE_ERROR(hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
for (int i = 0; i < FULL_DATA_SIZE; ++i) {
host_a[i] = rand();
host_b[i] = rand();
}
// .
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// .
HANDLE_ERROR(hipMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream0));
HANDLE_ERROR(hipMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream0));
kernel << < N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0);
// .
HANDLE_ERROR(hipMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0));
// .
HANDLE_ERROR(hipMemcpyAsync(dev_a1, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream1));
HANDLE_ERROR(hipMemcpyAsync(dev_b1, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream1));
kernel << < N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1);
// .
HANDLE_ERROR(hipMemcpyAsync(host_c + i, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1));
}
// .
HANDLE_ERROR(hipStreamSynchronize(stream0));
HANDLE_ERROR(hipStreamSynchronize(stream1));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.1f ms\n", elapsedTime);
// .
HANDLE_ERROR(hipHostFree(host_a));
HANDLE_ERROR(hipHostFree(host_b));
HANDLE_ERROR(hipHostFree(host_c));
HANDLE_ERROR(hipFree(dev_a0));
HANDLE_ERROR(hipFree(dev_b0));
HANDLE_ERROR(hipFree(dev_c0));
HANDLE_ERROR(hipFree(dev_a1));
HANDLE_ERROR(hipFree(dev_b1));
HANDLE_ERROR(hipFree(dev_c1));
HANDLE_ERROR(hipStreamDestroy(stream0));
HANDLE_ERROR(hipStreamDestroy(stream1));
return 0;
} | 77a378c48975ceabcb630169e68d930bd0fd144e.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "book.h"
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel(int* a, int* b, int* c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.f;
c[idx] = (as + bs) / 2;
}
}
int main(void) {
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop, whichDevice));
if (!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speed up from streams\n");
}
else {
printf("Deprecated. Use instead asyncEngineCount\n");
}
if (prop.asyncEngineCount) {
printf("Number of asynchronous engines %d\n", prop.asyncEngineCount);
}
cudaEvent_t start, stop;
float elapsedTime;
// 타이머를 작동시킨다.
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
// 스트림을 초기화한다.
cudaStream_t stream0, stream1;
HANDLE_ERROR(cudaStreamCreate(&stream0));
HANDLE_ERROR(cudaStreamCreate(&stream1));
int* host_a, * host_b, * host_c;
int* dev_a0, * dev_b0, * dev_c0;
int* dev_a1, * dev_b1, * dev_c1;
// GPU 메모리를 할당한다.
HANDLE_ERROR(cudaMalloc((void**)&dev_a0, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b0, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c0, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_a1, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b1, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c1, N * sizeof(int)));
// 스트림에서 이용하기 위해 잠긴 페이지의 메모리를 할당한다.
HANDLE_ERROR(cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
for (int i = 0; i < FULL_DATA_SIZE; ++i) {
host_a[i] = rand();
host_b[i] = rand();
}
// 모든 데이터에 대해서 부분 크기 간격으로 루프를 돈다.
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// 비동기적으로 잠긴 페이지의 메모리를 디바이스로 복사한다.
HANDLE_ERROR(cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0));
HANDLE_ERROR(cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0));
kernel << < N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0);
//디바이스에서 잠긴 페이지의 메모리로 데이터를 복사한다.
HANDLE_ERROR(cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0));
// 비동기적으로 잠긴 페이지의 메모리를 디바이스로 복사한다.
HANDLE_ERROR(cudaMemcpyAsync(dev_a1, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream1));
HANDLE_ERROR(cudaMemcpyAsync(dev_b1, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream1));
kernel << < N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1);
//디바이스에서 잠긴 페이지의 메모리로 데이터를 복사한다.
HANDLE_ERROR(cudaMemcpyAsync(host_c + i, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1));
}
// 잠긴 페이지의 메모리 버퍼에서 전체 버퍼로 부분의 결과를 복사한다.
HANDLE_ERROR(cudaStreamSynchronize(stream0));
HANDLE_ERROR(cudaStreamSynchronize(stream1));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.1f ms\n", elapsedTime);
// 스트림과 메모리를 해제한다.
HANDLE_ERROR(cudaFreeHost(host_a));
HANDLE_ERROR(cudaFreeHost(host_b));
HANDLE_ERROR(cudaFreeHost(host_c));
HANDLE_ERROR(cudaFree(dev_a0));
HANDLE_ERROR(cudaFree(dev_b0));
HANDLE_ERROR(cudaFree(dev_c0));
HANDLE_ERROR(cudaFree(dev_a1));
HANDLE_ERROR(cudaFree(dev_b1));
HANDLE_ERROR(cudaFree(dev_c1));
HANDLE_ERROR(cudaStreamDestroy(stream0));
HANDLE_ERROR(cudaStreamDestroy(stream1));
return 0;
} |
4acb675ac567942fa46990afb141924db0ee6a77.hip | // !!! This is a file automatically generated by hipify!!!
// Simple SUDOKU probram in CUDA
// cmpe297_hw3_easysudoku.cu
/* *
HW3 by Jiongfeng Chen
Parallelized GPU version of Sudoku Puzzle Algorithm and Its OPTIMIZATION
OPTIMIZATION Thinking:
ADD possible value array to save sesults from the last run to reduce iteration times
So it only compare the possible values found from last run, and largely reduce the i
teration times of the loop in the following kernel code. It is extremely useful when
the IS_OPTION is complex and require much excution time.
temp = IS_OPTION(row, col, k); //old
temp = IS_OPTION(row, col, value_avail[mIter]); //optimized
Test Case:
input matrix:
const int input_sdk[9][9]
= {{0, 7, 0, 0, 6, 5, 0, 8, 0},
{6, 0, 0, 0, 3, 0, 4, 0, 0},
{0, 2, 0, 0, 4, 0, 7, 0, 0},
{8, 6, 0, 0, 0, 2, 5, 7, 0},
{0, 0, 7, 4, 0, 6, 1, 0, 0},
{0, 5, 2, 3, 0, 0, 0, 6, 4},
{0, 0, 8, 0, 2, 0, 0, 3, 0},
{0, 0, 5, 0, 8, 0, 0, 0, 1},
{0, 4, 0, 7, 1, 0, 0, 5, 0}};
Output:
0 *7* 0 | 0 *6* *5* | 0 *8* 0
*6* 0 0 | 0 *3* 0 | *4* 0 0
0 *2* 0 | 0 *4* 0 | *7* 0 0
---------------------------------------------------------------------
*8* *6* 0 | 0 0 *2* | *5* *7* 0
0 0 *7* | *4* 0 *6* | *1* 0 0
0 *5* *2* | *3* 0 0 | 0 *6* *4*
---------------------------------------------------------------------
0 0 *8* | 0 *2* 0 | 0 *3* 0
0 0 *5* | 0 *8* 0 | 0 0 *1*
0 *4* 0 | *7* *1* 0 | 0 *5* 0
*4* *7* *1* | *9* *6* *5* | *3* *8* *2*
*6* *8* *9* | *2* *3* *7* | *4* *1* *5*
*5* *2* *3* | *8* *4* *1* | *7* *9* *6*
---------------------------------------------------------------------
*8* *6* *4* | *1* *9* *2* | *5* *7* *3*
*3* *9* *7* | *4* *5* *6* | *1* *2* *8*
*1* *5* *2* | *3* *7* *8* | *9* *6* *4*
---------------------------------------------------------------------
*9* *1* *8* | *5* *2* *4* | *6* *3* *7*
*7* *3* *5* | *6* *8* *9* | *2* *4* *1*
*2* *4* *6* | *7* *1* *3* | *8* *5* *9*
Kernel Execution Time: 37960 cycles
Total cycles: 37960
run:nvcc -I/usr/local/cuda/include -I. -lineinfo -arch=sm_53 -g -c cmpe297_hw3_sudoku.cu -o cmpe297_hw3_sudoku.o
*/
#include<stdio.h>
#include<string.h>
#include <hip/hip_runtime.h>
const int big_2x[9][9] = {{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1}};
// input 9x9 sudoku :
// - 1~9 : valid values
// - 0 : no value is decided
const int input_sdk[9][9] = {{0, 7, 0, 0, 6, 5, 0, 8, 0},
{6, 0, 0, 0, 3, 0, 4, 0, 0},
{0, 2, 0, 0, 4, 0, 7, 0, 0},
{8, 6, 0, 0, 0, 2, 5, 7, 0},
{0, 0, 7, 4, 0, 6, 1, 0, 0},
{0, 5, 2, 3, 0, 0, 0, 6, 4},
{0, 0, 8, 0, 2, 0, 0, 3, 0},
{0, 0, 5, 0, 8, 0, 0, 0, 1},
{0, 4, 0, 7, 1, 0, 0, 5, 0}};
typedef struct {
int val[9][9]; // values that each entry can get
int num_options[9][9]; // number of values that each entry can get
int not_in_cell[9][9]; // values not in each 3x3 cell
int not_in_row[9][9]; // values not in each row
int not_in_col[9][9]; // values not in each column
} stContext;
stContext context;
void initialize_all();
void print_all();
#define WIDTH 9
#define IS_OPTION(row, col, k) \
((shared_not_in_row[row][k] == 1) && \
(shared_not_in_col[col][k] == 1) && \
(shared_not_in_cell[row/3+(col/3)*3][k] == 1))? 1 : 0;
__device__ int
memcmp(stContext *input, int width)
{
for(int i = 0; i< width; i++)
for(int j = 0; j< width; j++)
if(input->num_options[i][j] != 1)
return 1;
return 0;
}
#define FINISHED() (memcmp(context, WIDTH) == 0? 1: 0)
// rule: numbers should be unique in each sub-array, each row, and each column
__global__ void k_Sudoku(stContext *context, int *runtime)
{
const unsigned int col = threadIdx.x;
const unsigned int row = threadIdx.y;
//share memory to improve performance
__shared__ int shared_val[WIDTH][WIDTH];
__shared__ int shared_num_options[WIDTH][WIDTH];
__shared__ int shared_not_in_cell[WIDTH][WIDTH];
__shared__ int shared_not_in_row[WIDTH][WIDTH];
__shared__ int shared_not_in_col[WIDTH][WIDTH];
shared_val[row][col] = context->val[row][col];
shared_num_options[row][col] = context->num_options[row][col];
shared_not_in_cell[row][col] = context->not_in_cell[row][col];
shared_not_in_row[row][col] = context->not_in_row[row][col];
shared_not_in_col[row][col] = context->not_in_col[row][col];
__syncthreads();
//CLOCK: Measure the performance AFTER OPTIMIZATION
int start_time = clock64();
//printf("col %d row %d threads\n", col, row);
while(!FINISHED())
{
//printf("again col %d row %d threads\n", col, row);
if(shared_num_options[row][col] > 1)
{
// Find values that are not in the row, col, and the
// 3x3 cell that (row, col) is belonged to.
int value = 0, temp;
shared_num_options[row][col] = 0;
//OPTIMIZATION, ADD possible value to save
//results from the last run to reduce iteration times
int value_avail[9];
for(int kIter = 0; kIter < 9; kIter++)
value_avail[kIter] = 0;
//BEFORE OPTIMIZATION
/*for(int k = 0; k < 9; k++)
{
temp = IS_OPTION(row, col, k);
if(temp == 1)
{
shared_num_options[row][col]++;
value = k;
}
}
*/
//AFTER OPTIMIZATION, ADD possible value to save
//results from the last run to reduce iteration times
int firstRun=1;
int nIter =0;
if(firstRun==1){
for(int k = 0; k < 9; k++)
{
temp = IS_OPTION(row, col, k);
if(temp == 1)
{
shared_num_options[row][col]++;
value = k;
value_avail[nIter++] =value;
}
}
firstRun=0;
}
else{
int mIter=0;
while(value_avail[mIter]!=0)
{ //OPTIMIZATION
//only compare the possible values found from last run
temp = IS_OPTION(row, col, value_avail[mIter]);
if(temp == 1)
{
shared_num_options[row][col]++;
value = value_avail[mIter];
}
mIter++;
}
value_avail[mIter] = 0;
}
// If the above loop found only one value,
// set the value to (row, col)
if(shared_num_options[row][col] == 1)
{
shared_not_in_row[row][value] = 0;
shared_not_in_col[col][value] = 0;
shared_not_in_cell[(row)/3+((col)/3)*3][value] = 0;
shared_val[row][col] = value+1;
}
}
context->num_options[row][col] = shared_num_options[row][col];
__syncthreads();
}//end while, find all grids
context->val[row][col] = shared_val[row][col];
context->num_options[row][col] = shared_num_options[row][col];
context->not_in_cell[row][col] = shared_not_in_cell[row][col];
context->not_in_row[row][col] = shared_not_in_row[row][col];
context->not_in_col[row][col] = shared_not_in_col[row][col];
__syncthreads();
//CLOCK: Measure the performance AFTER OPTIMIZATION
int stop_time = clock64();
runtime[row*WIDTH+col] = (int)(stop_time - start_time);
}
int main(int argc, char **argv)
{
hipError_t err;
initialize_all();
print_all();
stContext *k_context; //device matrix
//CLOCK: Measure the performance AFTER OPTIMIZATION
int* runtime; /*Exection cycles*/
int* d_runtime;
int runtime_size = WIDTH*WIDTH*sizeof(int);
hipMalloc((void**)&d_runtime, runtime_size);
runtime = (int *) malloc(runtime_size);
memset(runtime, 0, runtime_size);
// TODO: Allocate matrix in GPU device memory
// Print the matrix size to be used, and compute its size
int size = 5*WIDTH*WIDTH*sizeof(int);
//printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
err = hipMalloc((void**)&k_context, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// TODO: Copy the input matrix to GPU
err = hipMemcpy(k_context, &context, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy data from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Assign as many threads as the matrix size so that
// each thread can deal with one entry of the matrix
dim3 dimBlock(WIDTH, WIDTH, 1);
dim3 dimGrid(1, 1, 1);
// TODO: Call the kernel function
hipLaunchKernelGGL(( k_Sudoku), dim3(dimGrid),dim3(dimBlock), 0, 0, k_context, d_runtime);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Kernel execution failed (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// TODO: Copy the result matrix from the GPU device memory
err = hipMemcpy(&context, k_context, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy data from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
//CLOCK: Measure the performance AFTER OPTIMIZATION
//Copy the execution times from the GPU memory to HOST Code
hipMemcpy(runtime, d_runtime, runtime_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//unsigned long long elapsed_time = 0;
int elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
// Print the result
print_all();
//printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
printf("Kernel Execution Time: %d cycles\n", elapsed_time);
printf("Total cycles: %d \n", elapsed_time);
// Free host memory
free(runtime);
// Free the device memory
err = hipFree(k_context);
err = hipFree(d_runtime);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free gpu data (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
getchar();
return 0;
}
void initialize_all()
{
int i, j;
memcpy(context.not_in_cell,big_2x, sizeof(big_2x));
memcpy(context.not_in_row,big_2x, sizeof(big_2x));
memcpy(context.not_in_col,big_2x, sizeof(big_2x));
for(i=0; i<9; i++){
for(j=0; j<9; j++){
if(input_sdk[i][j] == 0)
{
context.val[i][j] = 0;
context.num_options[i][j]=9;
}
else
{
context.val[i][j] = input_sdk[i][j];
context.num_options[i][j] = 1;
context.not_in_cell[i/3+(j/3)*3][input_sdk[i][j]-1] = 0;
context.not_in_col[j][input_sdk[i][j]-1] = 0;
context.not_in_row[i][input_sdk[i][j]-1] = 0;
}
}
}
}
void print_all()
{
int i, j, k;
for(i=0; i<9; i++){
for(j=0; j<9; j++){
if(context.val[i][j] == 0)
fprintf(stdout, " %1d ", context.val[i][j]);
else
fprintf(stdout, " *%1d* ", context.val[i][j]);
if((j==2)||(j==5)){
fprintf(stdout, "| ");
}
}
fprintf(stdout, "\n");
if((i==2)||(i==5)){
for(k=0; k<69; k++){
fprintf(stdout, "-");
}
fprintf(stdout, "\n");
}
}
fprintf(stdout, "\n");
}
| 4acb675ac567942fa46990afb141924db0ee6a77.cu | // Simple SUDOKU probram in CUDA
// cmpe297_hw3_easysudoku.cu
/* *
HW3 by Jiongfeng Chen
Parallelized GPU version of Sudoku Puzzle Algorithm and Its OPTIMIZATION
OPTIMIZATION Thinking:
ADD possible value array to save sesults from the last run to reduce iteration times
So it only compare the possible values found from last run, and largely reduce the i
teration times of the loop in the following kernel code. It is extremely useful when
the IS_OPTION is complex and require much excution time.
temp = IS_OPTION(row, col, k); //old
temp = IS_OPTION(row, col, value_avail[mIter]); //optimized
Test Case:
input matrix:
const int input_sdk[9][9]
= {{0, 7, 0, 0, 6, 5, 0, 8, 0},
{6, 0, 0, 0, 3, 0, 4, 0, 0},
{0, 2, 0, 0, 4, 0, 7, 0, 0},
{8, 6, 0, 0, 0, 2, 5, 7, 0},
{0, 0, 7, 4, 0, 6, 1, 0, 0},
{0, 5, 2, 3, 0, 0, 0, 6, 4},
{0, 0, 8, 0, 2, 0, 0, 3, 0},
{0, 0, 5, 0, 8, 0, 0, 0, 1},
{0, 4, 0, 7, 1, 0, 0, 5, 0}};
Output:
0 *7* 0 | 0 *6* *5* | 0 *8* 0
*6* 0 0 | 0 *3* 0 | *4* 0 0
0 *2* 0 | 0 *4* 0 | *7* 0 0
---------------------------------------------------------------------
*8* *6* 0 | 0 0 *2* | *5* *7* 0
0 0 *7* | *4* 0 *6* | *1* 0 0
0 *5* *2* | *3* 0 0 | 0 *6* *4*
---------------------------------------------------------------------
0 0 *8* | 0 *2* 0 | 0 *3* 0
0 0 *5* | 0 *8* 0 | 0 0 *1*
0 *4* 0 | *7* *1* 0 | 0 *5* 0
*4* *7* *1* | *9* *6* *5* | *3* *8* *2*
*6* *8* *9* | *2* *3* *7* | *4* *1* *5*
*5* *2* *3* | *8* *4* *1* | *7* *9* *6*
---------------------------------------------------------------------
*8* *6* *4* | *1* *9* *2* | *5* *7* *3*
*3* *9* *7* | *4* *5* *6* | *1* *2* *8*
*1* *5* *2* | *3* *7* *8* | *9* *6* *4*
---------------------------------------------------------------------
*9* *1* *8* | *5* *2* *4* | *6* *3* *7*
*7* *3* *5* | *6* *8* *9* | *2* *4* *1*
*2* *4* *6* | *7* *1* *3* | *8* *5* *9*
Kernel Execution Time: 37960 cycles
Total cycles: 37960
run:nvcc -I/usr/local/cuda/include -I. -lineinfo -arch=sm_53 -g -c cmpe297_hw3_sudoku.cu -o cmpe297_hw3_sudoku.o
*/
#include<stdio.h>
#include<string.h>
#include <cuda_runtime.h>
const int big_2x[9][9] = {{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1}};
// input 9x9 sudoku :
// - 1~9 : valid values
// - 0 : no value is decided
const int input_sdk[9][9] = {{0, 7, 0, 0, 6, 5, 0, 8, 0},
{6, 0, 0, 0, 3, 0, 4, 0, 0},
{0, 2, 0, 0, 4, 0, 7, 0, 0},
{8, 6, 0, 0, 0, 2, 5, 7, 0},
{0, 0, 7, 4, 0, 6, 1, 0, 0},
{0, 5, 2, 3, 0, 0, 0, 6, 4},
{0, 0, 8, 0, 2, 0, 0, 3, 0},
{0, 0, 5, 0, 8, 0, 0, 0, 1},
{0, 4, 0, 7, 1, 0, 0, 5, 0}};
typedef struct {
int val[9][9]; // values that each entry can get
int num_options[9][9]; // number of values that each entry can get
int not_in_cell[9][9]; // values not in each 3x3 cell
int not_in_row[9][9]; // values not in each row
int not_in_col[9][9]; // values not in each column
} stContext;
stContext context;
void initialize_all();
void print_all();
#define WIDTH 9
#define IS_OPTION(row, col, k) \
((shared_not_in_row[row][k] == 1) && \
(shared_not_in_col[col][k] == 1) && \
(shared_not_in_cell[row/3+(col/3)*3][k] == 1))? 1 : 0;
__device__ int
memcmp(stContext *input, int width)
{
for(int i = 0; i< width; i++)
for(int j = 0; j< width; j++)
if(input->num_options[i][j] != 1)
return 1;
return 0;
}
#define FINISHED() (memcmp(context, WIDTH) == 0? 1: 0)
// rule: numbers should be unique in each sub-array, each row, and each column
__global__ void k_Sudoku(stContext *context, int *runtime)
{
const unsigned int col = threadIdx.x;
const unsigned int row = threadIdx.y;
//share memory to improve performance
__shared__ int shared_val[WIDTH][WIDTH];
__shared__ int shared_num_options[WIDTH][WIDTH];
__shared__ int shared_not_in_cell[WIDTH][WIDTH];
__shared__ int shared_not_in_row[WIDTH][WIDTH];
__shared__ int shared_not_in_col[WIDTH][WIDTH];
shared_val[row][col] = context->val[row][col];
shared_num_options[row][col] = context->num_options[row][col];
shared_not_in_cell[row][col] = context->not_in_cell[row][col];
shared_not_in_row[row][col] = context->not_in_row[row][col];
shared_not_in_col[row][col] = context->not_in_col[row][col];
__syncthreads();
//CLOCK: Measure the performance AFTER OPTIMIZATION
int start_time = clock64();
//printf("col %d row %d threads\n", col, row);
while(!FINISHED())
{
//printf("again col %d row %d threads\n", col, row);
if(shared_num_options[row][col] > 1)
{
// Find values that are not in the row, col, and the
// 3x3 cell that (row, col) is belonged to.
int value = 0, temp;
shared_num_options[row][col] = 0;
//OPTIMIZATION, ADD possible value to save
//results from the last run to reduce iteration times
int value_avail[9];
for(int kIter = 0; kIter < 9; kIter++)
value_avail[kIter] = 0;
//BEFORE OPTIMIZATION
/*for(int k = 0; k < 9; k++)
{
temp = IS_OPTION(row, col, k);
if(temp == 1)
{
shared_num_options[row][col]++;
value = k;
}
}
*/
//AFTER OPTIMIZATION, ADD possible value to save
//results from the last run to reduce iteration times
int firstRun=1;
int nIter =0;
if(firstRun==1){
for(int k = 0; k < 9; k++)
{
temp = IS_OPTION(row, col, k);
if(temp == 1)
{
shared_num_options[row][col]++;
value = k;
value_avail[nIter++] =value;
}
}
firstRun=0;
}
else{
int mIter=0;
while(value_avail[mIter]!=0)
{ //OPTIMIZATION
//only compare the possible values found from last run
temp = IS_OPTION(row, col, value_avail[mIter]);
if(temp == 1)
{
shared_num_options[row][col]++;
value = value_avail[mIter];
}
mIter++;
}
value_avail[mIter] = 0;
}
// If the above loop found only one value,
// set the value to (row, col)
if(shared_num_options[row][col] == 1)
{
shared_not_in_row[row][value] = 0;
shared_not_in_col[col][value] = 0;
shared_not_in_cell[(row)/3+((col)/3)*3][value] = 0;
shared_val[row][col] = value+1;
}
}
context->num_options[row][col] = shared_num_options[row][col];
__syncthreads();
}//end while, find all grids
context->val[row][col] = shared_val[row][col];
context->num_options[row][col] = shared_num_options[row][col];
context->not_in_cell[row][col] = shared_not_in_cell[row][col];
context->not_in_row[row][col] = shared_not_in_row[row][col];
context->not_in_col[row][col] = shared_not_in_col[row][col];
__syncthreads();
//CLOCK: Measure the performance AFTER OPTIMIZATION
int stop_time = clock64();
runtime[row*WIDTH+col] = (int)(stop_time - start_time);
}
int main(int argc, char **argv)
{
cudaError_t err;
initialize_all();
print_all();
stContext *k_context; //device matrix
//CLOCK: Measure the performance AFTER OPTIMIZATION
int* runtime; /*Exection cycles*/
int* d_runtime;
int runtime_size = WIDTH*WIDTH*sizeof(int);
cudaMalloc((void**)&d_runtime, runtime_size);
runtime = (int *) malloc(runtime_size);
memset(runtime, 0, runtime_size);
// TODO: Allocate matrix in GPU device memory
// Print the matrix size to be used, and compute its size
int size = 5*WIDTH*WIDTH*sizeof(int);
//printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
err = cudaMalloc((void**)&k_context, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// TODO: Copy the input matrix to GPU
err = cudaMemcpy(k_context, &context, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy data from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Assign as many threads as the matrix size so that
// each thread can deal with one entry of the matrix
dim3 dimBlock(WIDTH, WIDTH, 1);
dim3 dimGrid(1, 1, 1);
// TODO: Call the kernel function
k_Sudoku<<<dimGrid,dimBlock>>>(k_context, d_runtime);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// TODO: Copy the result matrix from the GPU device memory
err = cudaMemcpy(&context, k_context, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy data from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
//CLOCK: Measure the performance AFTER OPTIMIZATION
//Copy the execution times from the GPU memory to HOST Code
cudaMemcpy(runtime, d_runtime, runtime_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
//unsigned long long elapsed_time = 0;
int elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
// Print the result
print_all();
//printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
printf("Kernel Execution Time: %d cycles\n", elapsed_time);
printf("Total cycles: %d \n", elapsed_time);
// Free host memory
free(runtime);
// Free the device memory
err = cudaFree(k_context);
err = cudaFree(d_runtime);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free gpu data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
getchar();
return 0;
}
void initialize_all()
{
int i, j;
memcpy(context.not_in_cell,big_2x, sizeof(big_2x));
memcpy(context.not_in_row,big_2x, sizeof(big_2x));
memcpy(context.not_in_col,big_2x, sizeof(big_2x));
for(i=0; i<9; i++){
for(j=0; j<9; j++){
if(input_sdk[i][j] == 0)
{
context.val[i][j] = 0;
context.num_options[i][j]=9;
}
else
{
context.val[i][j] = input_sdk[i][j];
context.num_options[i][j] = 1;
context.not_in_cell[i/3+(j/3)*3][input_sdk[i][j]-1] = 0;
context.not_in_col[j][input_sdk[i][j]-1] = 0;
context.not_in_row[i][input_sdk[i][j]-1] = 0;
}
}
}
}
void print_all()
{
int i, j, k;
for(i=0; i<9; i++){
for(j=0; j<9; j++){
if(context.val[i][j] == 0)
fprintf(stdout, " %1d ", context.val[i][j]);
else
fprintf(stdout, " *%1d* ", context.val[i][j]);
if((j==2)||(j==5)){
fprintf(stdout, "| ");
}
}
fprintf(stdout, "\n");
if((i==2)||(i==5)){
for(k=0; k<69; k++){
fprintf(stdout, "-");
}
fprintf(stdout, "\n");
}
}
fprintf(stdout, "\n");
}
|
38961020a73611e2d173fe37be10698b3a95b3d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
}
| 38961020a73611e2d173fe37be10698b3a95b3d3.cu | struct ProgramGPUColorRGB
{
__device__ ProgramGPUColorRGB()
{
}
unsigned char Blue;
unsigned char Green;
unsigned char Red;
unsigned char Alpha;
};
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0);
// Insaniquarium_Deluxe_Bot.Program
extern "C" __global__ void FindPixel( ProgramGPUColorRGB* rgbColors, int rgbColorsLen0, ProgramGPUColorRGB* colors, int colorsLen0, int* indices, int indicesLen0, float* output, int outputLen0)
{
__syncthreads();
indices[(threadIdx.x)]++;
}
|
21d204d8e981a9106969541a870d4da1e3b5458a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define zgeru_bs 512 // 512 is max threads for 1.x cards
void magma_zgetf2_swap(
magma_int_t n, magmaDoubleComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue );
void magma_zscal_zgeru(
magma_int_t m, magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda,
magma_queue_t );
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
ZGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_getf2
*******************************************************************************/
extern "C" magma_int_t
magma_zgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > zgeru_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for (j=0; j < min_mn; j++) {
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_izamax( m-j, dA(j,j), 1, queue );
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_zgetf2_swap( n, dA, j, jp, ldda, queue );
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_zscal_zgeru( m-j, n-j, dA(j, j), ldda, queue );
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
// ===========================================================================
// TODO: use standard BLAS magma_zswap?
#define zswap_bs 64
/******************************************************************************/
__global__
void kernel_zswap(int n, magmaDoubleComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * zswap_bs + threadIdx.x;
if (id < n) {
magmaDoubleComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
/******************************************************************************/
void magma_zgetf2_swap(
magma_int_t n, magmaDoubleComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue )
{
/* zswap two row vectors: ith and jth */
dim3 threads( zswap_bs );
dim3 grid( magma_ceildiv( n, zswap_bs ) );
hipLaunchKernelGGL(( kernel_zswap)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x, i, j, incx);
}
/******************************************************************************/
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaDoubleComplex shared_data[];
/******************************************************************************/
__global__
void kernel_zscal_zgeru(int m, int n, magmaDoubleComplex *A, int lda)
{
magmaDoubleComplex *shared_y = shared_data;
int tid = blockIdx.x * zgeru_bs + threadIdx.x;
magmaDoubleComplex reg = MAGMA_Z_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg;
}
}
}
/******************************************************************************/
void magma_zscal_zgeru(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
/*
Specialized kernel that merges zscal and zgeru
1) zscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads( zgeru_bs );
dim3 grid( magma_ceildiv( m, zgeru_bs ) );
size_t shared_size = sizeof(magmaDoubleComplex)*(n);
hipLaunchKernelGGL(( kernel_zscal_zgeru)
, dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
m, n, dA, ldda);
}
| 21d204d8e981a9106969541a870d4da1e3b5458a.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define zgeru_bs 512 // 512 is max threads for 1.x cards
void magma_zgetf2_swap(
magma_int_t n, magmaDoubleComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue );
void magma_zscal_zgeru(
magma_int_t m, magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda,
magma_queue_t );
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
ZGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_getf2
*******************************************************************************/
extern "C" magma_int_t
magma_zgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > zgeru_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for (j=0; j < min_mn; j++) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_izamax( m-j, dA(j,j), 1, queue );
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_zgetf2_swap( n, dA, j, jp, ldda, queue );
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_zscal_zgeru( m-j, n-j, dA(j, j), ldda, queue );
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
// ===========================================================================
// TODO: use standard BLAS magma_zswap?
#define zswap_bs 64
/******************************************************************************/
__global__
void kernel_zswap(int n, magmaDoubleComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * zswap_bs + threadIdx.x;
if (id < n) {
magmaDoubleComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
/******************************************************************************/
void magma_zgetf2_swap(
magma_int_t n, magmaDoubleComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue )
{
/* zswap two row vectors: ith and jth */
dim3 threads( zswap_bs );
dim3 grid( magma_ceildiv( n, zswap_bs ) );
kernel_zswap
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x, i, j, incx);
}
/******************************************************************************/
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaDoubleComplex shared_data[];
/******************************************************************************/
__global__
void kernel_zscal_zgeru(int m, int n, magmaDoubleComplex *A, int lda)
{
magmaDoubleComplex *shared_y = shared_data;
int tid = blockIdx.x * zgeru_bs + threadIdx.x;
magmaDoubleComplex reg = MAGMA_Z_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg;
}
}
}
/******************************************************************************/
void magma_zscal_zgeru(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
/*
Specialized kernel that merges zscal and zgeru
1) zscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads( zgeru_bs );
dim3 grid( magma_ceildiv( m, zgeru_bs ) );
size_t shared_size = sizeof(magmaDoubleComplex)*(n);
kernel_zscal_zgeru
<<< grid, threads, shared_size, queue->cuda_stream() >>>
(m, n, dA, ldda);
}
|
1ce952d947c56e4cb85fc2af39892a17e8501dc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2019 Arne Petersen, Kiel University
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject
* to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
* LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
* NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "UnprojectFromDisparity_basic.hh"
#include "CudaMinifuncs.cuh"
#include "PIPInterOpCUDA/CUDAImageArray.hh"
#include "PIPInterOpCUDA/CUDAImageTexture.hh"
using namespace PIP;
struct SLocalParams
{
// Description of target camera
PIP::MTCamProjection<float> projTarget;
// Description for MLA (radius etc.)
SPlenCamDescription descrMla;
// Bounding box upper left pixel in plenoptic image
vec2<int> vUpperLeft;
// Bounding box upper left pixel in plenoptic image
vec2<int> vLowerRight;
// Lower clipping for normalized disparities, out of bounds depths are discarded
float fMinNormedDisp;
// Upper clipping for normalized disparities, out of bounds depths are discarded
float fMaxNormedDisp;
};
__device__ __constant__ SLocalParams globalParams;
__device__ __constant__ int2 globalOffsetsGridIdcsHex[6];
__device__ __constant__ int2 globalOffsetsGridIdcsReg[4];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<const int t_intChannels, const EGridType t_eGridType>
__global__ void computeUnproject(float* outputPoints3D, float* outputPointColors,
float* outputDepthmap, float* outputSynthImage,
hipTextureObject_t texInputDisparities, hipTextureObject_t texInputPlenopticImage,
const int intArrLength)
{
// Get pixel position and test 'in image'
vec2<float> vPixelPos_px;
vPixelPos_px.Set(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y);
// reject out of bounds pixels
if ((vPixelPos_px.x < float(globalParams.vUpperLeft.x)) || (vPixelPos_px.y < float(globalParams.vUpperLeft.y))
|| (vPixelPos_px.x > float(globalParams.vLowerRight.x)) || (vPixelPos_px.y > float(globalParams.vLowerRight.y)))
return;
// Initial disparity normalized with lens diameter (inter-lens distance)
float fDisparity_baselines = tex2D<float>(texInputDisparities, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
// Zero-disparity is invalid estimation
if ((fDisparity_baselines == 0.0f)
||(fDisparity_baselines < globalParams.fMinNormedDisp)
||(fDisparity_baselines > globalParams.fMaxNormedDisp))
{
return;
}
// Get index of source lens in grid
vec2<float> vGridIndex;
// comming from plenoptic image implies using mirco-image grid
vGridIndex = globalParams.descrMla.PixelToLensImageGrid<t_eGridType>(vPixelPos_px);
// round to integral lens index
vGridIndex = globalParams.descrMla.GridRound<t_eGridType>(vGridIndex);
// get pinhole properties of micro camera relative to main lens
PIP::MTCamProjection<float> projMicroLens = globalParams.descrMla.GetMicrocamProjection<t_eGridType>(vGridIndex);
// 3-space position relative to main lens in mm
vec3<float> vPos3D = projMicroLens.Unproject(vPixelPos_px,
globalParams.descrMla.fMicroLensPrincipalDist_px * globalParams.descrMla.fPixelsize_mm / fDisparity_baselines);
// project point through mainlens.
vPos3D = MapThinLens(globalParams.descrMla.fMainLensFLength_mm, vPos3D);
// Write output position
int index = int(vPixelPos_px.y) * (globalParams.vLowerRight.x - globalParams.vUpperLeft.x + 1) * 4
+ int(vPixelPos_px.x) * 4;
// if (index >= 4*intArrLength)
// printf("index %d, px [%g;%g]\n", index, vPixelPos_px.x, vPixelPos_px.y);
// output data format: normalize float with lens diameter
outputPoints3D[index + 0] = vPos3D.x;
outputPoints3D[index + 1] = vPos3D.y;
outputPoints3D[index + 2] = vPos3D.z;
outputPoints3D[index + 3] = 1.0f / fDisparity_baselines;
// Fetch color depending on input image channels
float4 vColor;
if (t_intChannels == 1)
{
float fIntensity = tex2D<float>(texInputPlenopticImage, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
vColor.x = fIntensity;
vColor.y = fIntensity;
vColor.z = fIntensity;
vColor.w = 1.0f;
}
else if (t_intChannels == 2)
{
float2 vIntensityAlpha = tex2D<float2>(texInputPlenopticImage, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
vColor.x = vIntensityAlpha.x;
vColor.y = vIntensityAlpha.x;
vColor.z = vIntensityAlpha.x;
vColor.w = vIntensityAlpha.y;
}
else
{
vColor = tex2D<float4>(texInputPlenopticImage, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
}
// Set color for 3-space point corresponding to pixel pos
outputPointColors[index + 0] = vColor.x;
outputPointColors[index + 1] = vColor.y;
outputPointColors[index + 2] = vColor.z;
outputPointColors[index + 3] = vColor.w;
// Project 3-space point to virtual camera and add weighted (by raw images alpha) color/depth to AiF/depth image sum
vec2<float> vTargetPixel = globalParams.projTarget.Project(vPos3D);
if ((vTargetPixel.x < 1)||(vTargetPixel.y < 1)
||(vTargetPixel.x > float(globalParams.projTarget.vecRes.x-2))
||(vTargetPixel.y > float(globalParams.projTarget.vecRes.y-2)))
{
// projected fusion pixel is out of bounds
return;
}
index = int(vTargetPixel.y) * globalParams.projTarget.vecRes.x * 4 + int(vTargetPixel.x) * 4;
atomicAdd(outputSynthImage + index + 0, vColor.w*vColor.x);
atomicAdd(outputSynthImage + index + 1, vColor.w*vColor.y);
atomicAdd(outputSynthImage + index + 2, vColor.w*vColor.z);
atomicAdd(outputSynthImage + index + 3, vColor.w);
atomicAdd(outputDepthmap + index/4, vColor.w*vPos3D.z);
return;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeNormalizeDepthmap(float* inoutDepthSum, float* inoutColorsAndWeightSum, int iWidth, int iHeight)
{
// Get pixel position and test 'in image'
vec2<int> vPixelPos_px;
vPixelPos_px.Set(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y);
// reject out of bounds pixels
if ((vPixelPos_px.x < 0) || (vPixelPos_px.y < 0) || (vPixelPos_px.x >= iWidth) || (vPixelPos_px.y >= iHeight))
{
return;
}
const int index = vPixelPos_px.y * iWidth * 4 + vPixelPos_px.x * 4;
const float fWeight = inoutColorsAndWeightSum[index + 3];
if (fWeight != 0)
{
// Normalize depth sum by weight sum
inoutDepthSum[index / 4] /= fWeight;
// Normalize color sum by weight sum
inoutColorsAndWeightSum[index + 0] /= fWeight;
inoutColorsAndWeightSum[index + 1] /= fWeight;
inoutColorsAndWeightSum[index + 2] /= fWeight;
}
else
{
inoutDepthSum[index / 4] = 0;
inoutColorsAndWeightSum[index + 0] = 0;
inoutColorsAndWeightSum[index + 1] = 0;
inoutColorsAndWeightSum[index + 2] = 0;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void CCUDAUnprojectFromDisparity_basic::UnprojectDisparities( CVImage_sptr& spPoints3D, CVImage_sptr& spPointsColors,
CVImage_sptr& spDepthmap, CVImage_sptr& spSynthImage,
const CVImage_sptr& spDisparties, const CVImage_sptr& spPlenopticImage)
{
// Ensure MONO+ALPHA or COLOR+ALPHA input image and single channel float disparities
if (((spPlenopticImage->CvMat().channels() != 1) && (spPlenopticImage->CvMat().channels() != 2) && (spPlenopticImage->CvMat().channels() != 4))
|| (spDisparties->type() != CV_32FC1))
{
throw CRuntimeException("CCUDAUnprojectFromDisparity_basic::Unproject : Invalid input images given.", ERuntimeExcpetionType::ILLEGAL_ARGUMENT);
}
hipError_t e;
// Allocate and bind textures for input
CCUDAImageTexture texInputImage(spPlenopticImage);
CCUDAImageTexture texInputDisparities(spDisparties, false); // can't use normalized texture fetch for float
// Allocate destination image for 3D points
if (spPoints3D == nullptr)
{
spPoints3D = CVImage_sptr(new CVImage(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::Points3D));
}
else
{
spPoints3D->Reinit(SImageDataDescriptor(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::Points3D));
}
CCUDAImageArray<float> arrOutPoints3D(spPoints3D);
/// \todo check for illegal use of \ref hipMemset. Observed CUDA SPECIAL : if set to 0 memory is compromised ('random' values occur?)
hipMemset(arrOutPoints3D.GetDevicePointer(), 255, spPoints3D->bytecount());
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutPoints3D memset : \"")
+ std::string(hipGetErrorString(e)));
}
// Allocate destination image for 3D points colors
if (spPointsColors == nullptr)
{
spPointsColors = CVImage_sptr(new CVImage(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::RGBA));
}
else
{
spPointsColors->Reinit(SImageDataDescriptor(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::RGBA));
}
CCUDAImageArray<float> arrOutPointsColors(spPointsColors);
hipMemset(arrOutPointsColors.GetDevicePointer(), 1, spPointsColors->bytecount());
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutPointsColors memset : \"")
+ std::string(hipGetErrorString(e)));
}
// Allocate depthmap and TF
if (spDepthmap == nullptr)
{
spDepthmap = CVImage_sptr(new CVImage(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC1, EImageType::GRAYDEPTH));
}
else
{
spDepthmap->Reinit(SImageDataDescriptor(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC1, EImageType::GRAYDEPTH));
}
CCUDAImageArray<float> arrOutDepthmap(spDepthmap);
hipMemset(arrOutDepthmap.GetDevicePointer(), 0, spDepthmap->bytecount());
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutDepthmap memset : \"")
+ std::string(hipGetErrorString(e)));
}
// Allocate depthmap and TF
if (spSynthImage == nullptr)
{
spSynthImage = CVImage_sptr(new CVImage(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC4, EImageType::RGBA));
}
else
{
spSynthImage->Reinit(SImageDataDescriptor(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC4, EImageType::RGBA));
}
CCUDAImageArray<float> arrOutSynthImage(spSynthImage);
hipMemset(arrOutSynthImage.GetDevicePointer(), 0, spSynthImage->bytecount());
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutTotalFocus memset : \"")
+ std::string(hipGetErrorString(e)));
}
// Create CUDA parameter struct and upload to symbol
SLocalParams cudaParams;
cudaParams.descrMla = m_descrMLA;
cudaParams.projTarget = m_projTarget;
cudaParams.vUpperLeft.x = 0;
cudaParams.vUpperLeft.y = 0;
cudaParams.vLowerRight.x = spPlenopticImage->cols() - 1;
cudaParams.vLowerRight.y = spPlenopticImage->rows() - 1;
cudaParams.fMinNormedDisp = m_fMinNormedDisp;
cudaParams.fMaxNormedDisp = m_fMaxNormedDisp;
hipMemcpyToSymbol(globalParams, &cudaParams, sizeof(SLocalParams));
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Estimate : CUDA copy-to-symbol : \"") + std::string(hipGetErrorString(e)));
}
// create and start timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Create 32x32 pixel blocks
dim3 threadsPerBlock = dim3(32, 32);
dim3 blocks = dim3(spPlenopticImage->cols() / 32 + 1, spPlenopticImage->rows() / 32 + 1);
// Call kernel with appropriate channel count
if ((spPlenopticImage->CvMat().channels() == 2)&&(m_descrMLA.eGridType == EGridType::HEXAGONAL))
{
hipLaunchKernelGGL(( computeUnproject<2, EGridType::HEXAGONAL>), dim3(blocks), dim3(threadsPerBlock), 0, 0, arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
else if ((spPlenopticImage->CvMat().channels() == 2)&&(m_descrMLA.eGridType == EGridType::RECTANGULAR))
{
hipLaunchKernelGGL(( computeUnproject<2, EGridType::RECTANGULAR>), dim3(blocks), dim3(threadsPerBlock), 0, 0, arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
else if (m_descrMLA.eGridType == EGridType::HEXAGONAL)
{
hipLaunchKernelGGL(( computeUnproject<4, EGridType::HEXAGONAL>), dim3(blocks), dim3(threadsPerBlock), 0, 0, arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
else if (m_descrMLA.eGridType == EGridType::RECTANGULAR)
{
hipLaunchKernelGGL(( computeUnproject<4, EGridType::RECTANGULAR>), dim3(blocks), dim3(threadsPerBlock), 0, 0, arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
// Wait for kernels to finish and check for errors
hipDeviceSynchronize();
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA 'computeUnproject' launch error : \"") + std::string(hipGetErrorString(e)));
}
// Call kernel for normalization of sum of depths/colors
hipLaunchKernelGGL(( computeNormalizeDepthmap), dim3(blocks), dim3(threadsPerBlock), 0, 0, arrOutDepthmap.GetDevicePointer(), arrOutSynthImage.GetDevicePointer(),
m_projTarget.vecRes.x, m_projTarget.vecRes.y);
// Wait for kernels to finish
//hipDeviceSynchronize();
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA 'computeNormalizeSums' launch error : \"") + std::string(hipGetErrorString(e)));
}
// Query runtime
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("computeUnproject : %g [ms]\n", milliseconds);
if ((e = hipGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA timing error : \"") + std::string(hipGetErrorString(e)));
}
}
| 1ce952d947c56e4cb85fc2af39892a17e8501dc6.cu | /**
* Copyright 2019 Arne Petersen, Kiel University
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject
* to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
* LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
* NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "UnprojectFromDisparity_basic.hh"
#include "CudaMinifuncs.cuh"
#include "PIPInterOpCUDA/CUDAImageArray.hh"
#include "PIPInterOpCUDA/CUDAImageTexture.hh"
using namespace PIP;
struct SLocalParams
{
// Description of target camera
PIP::MTCamProjection<float> projTarget;
// Description for MLA (radius etc.)
SPlenCamDescription descrMla;
// Bounding box upper left pixel in plenoptic image
vec2<int> vUpperLeft;
// Bounding box upper left pixel in plenoptic image
vec2<int> vLowerRight;
// Lower clipping for normalized disparities, out of bounds depths are discarded
float fMinNormedDisp;
// Upper clipping for normalized disparities, out of bounds depths are discarded
float fMaxNormedDisp;
};
__device__ __constant__ SLocalParams globalParams;
__device__ __constant__ int2 globalOffsetsGridIdcsHex[6];
__device__ __constant__ int2 globalOffsetsGridIdcsReg[4];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<const int t_intChannels, const EGridType t_eGridType>
__global__ void computeUnproject(float* outputPoints3D, float* outputPointColors,
float* outputDepthmap, float* outputSynthImage,
cudaTextureObject_t texInputDisparities, cudaTextureObject_t texInputPlenopticImage,
const int intArrLength)
{
// Get pixel position and test 'in image'
vec2<float> vPixelPos_px;
vPixelPos_px.Set(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y);
// reject out of bounds pixels
if ((vPixelPos_px.x < float(globalParams.vUpperLeft.x)) || (vPixelPos_px.y < float(globalParams.vUpperLeft.y))
|| (vPixelPos_px.x > float(globalParams.vLowerRight.x)) || (vPixelPos_px.y > float(globalParams.vLowerRight.y)))
return;
// Initial disparity normalized with lens diameter (inter-lens distance)
float fDisparity_baselines = tex2D<float>(texInputDisparities, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
// Zero-disparity is invalid estimation
if ((fDisparity_baselines == 0.0f)
||(fDisparity_baselines < globalParams.fMinNormedDisp)
||(fDisparity_baselines > globalParams.fMaxNormedDisp))
{
return;
}
// Get index of source lens in grid
vec2<float> vGridIndex;
// comming from plenoptic image implies using mirco-image grid
vGridIndex = globalParams.descrMla.PixelToLensImageGrid<t_eGridType>(vPixelPos_px);
// round to integral lens index
vGridIndex = globalParams.descrMla.GridRound<t_eGridType>(vGridIndex);
// get pinhole properties of micro camera relative to main lens
PIP::MTCamProjection<float> projMicroLens = globalParams.descrMla.GetMicrocamProjection<t_eGridType>(vGridIndex);
// 3-space position relative to main lens in mm
vec3<float> vPos3D = projMicroLens.Unproject(vPixelPos_px,
globalParams.descrMla.fMicroLensPrincipalDist_px * globalParams.descrMla.fPixelsize_mm / fDisparity_baselines);
// project point through mainlens.
vPos3D = MapThinLens(globalParams.descrMla.fMainLensFLength_mm, vPos3D);
// Write output position
int index = int(vPixelPos_px.y) * (globalParams.vLowerRight.x - globalParams.vUpperLeft.x + 1) * 4
+ int(vPixelPos_px.x) * 4;
// if (index >= 4*intArrLength)
// printf("index %d, px [%g;%g]\n", index, vPixelPos_px.x, vPixelPos_px.y);
// output data format: normalize float with lens diameter
outputPoints3D[index + 0] = vPos3D.x;
outputPoints3D[index + 1] = vPos3D.y;
outputPoints3D[index + 2] = vPos3D.z;
outputPoints3D[index + 3] = 1.0f / fDisparity_baselines;
// Fetch color depending on input image channels
float4 vColor;
if (t_intChannels == 1)
{
float fIntensity = tex2D<float>(texInputPlenopticImage, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
vColor.x = fIntensity;
vColor.y = fIntensity;
vColor.z = fIntensity;
vColor.w = 1.0f;
}
else if (t_intChannels == 2)
{
float2 vIntensityAlpha = tex2D<float2>(texInputPlenopticImage, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
vColor.x = vIntensityAlpha.x;
vColor.y = vIntensityAlpha.x;
vColor.z = vIntensityAlpha.x;
vColor.w = vIntensityAlpha.y;
}
else
{
vColor = tex2D<float4>(texInputPlenopticImage, vPixelPos_px.x + 0.5f, vPixelPos_px.y + 0.5f);
}
// Set color for 3-space point corresponding to pixel pos
outputPointColors[index + 0] = vColor.x;
outputPointColors[index + 1] = vColor.y;
outputPointColors[index + 2] = vColor.z;
outputPointColors[index + 3] = vColor.w;
// Project 3-space point to virtual camera and add weighted (by raw images alpha) color/depth to AiF/depth image sum
vec2<float> vTargetPixel = globalParams.projTarget.Project(vPos3D);
if ((vTargetPixel.x < 1)||(vTargetPixel.y < 1)
||(vTargetPixel.x > float(globalParams.projTarget.vecRes.x-2))
||(vTargetPixel.y > float(globalParams.projTarget.vecRes.y-2)))
{
// projected fusion pixel is out of bounds
return;
}
index = int(vTargetPixel.y) * globalParams.projTarget.vecRes.x * 4 + int(vTargetPixel.x) * 4;
atomicAdd(outputSynthImage + index + 0, vColor.w*vColor.x);
atomicAdd(outputSynthImage + index + 1, vColor.w*vColor.y);
atomicAdd(outputSynthImage + index + 2, vColor.w*vColor.z);
atomicAdd(outputSynthImage + index + 3, vColor.w);
atomicAdd(outputDepthmap + index/4, vColor.w*vPos3D.z);
return;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeNormalizeDepthmap(float* inoutDepthSum, float* inoutColorsAndWeightSum, int iWidth, int iHeight)
{
// Get pixel position and test 'in image'
vec2<int> vPixelPos_px;
vPixelPos_px.Set(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y);
// reject out of bounds pixels
if ((vPixelPos_px.x < 0) || (vPixelPos_px.y < 0) || (vPixelPos_px.x >= iWidth) || (vPixelPos_px.y >= iHeight))
{
return;
}
const int index = vPixelPos_px.y * iWidth * 4 + vPixelPos_px.x * 4;
const float fWeight = inoutColorsAndWeightSum[index + 3];
if (fWeight != 0)
{
// Normalize depth sum by weight sum
inoutDepthSum[index / 4] /= fWeight;
// Normalize color sum by weight sum
inoutColorsAndWeightSum[index + 0] /= fWeight;
inoutColorsAndWeightSum[index + 1] /= fWeight;
inoutColorsAndWeightSum[index + 2] /= fWeight;
}
else
{
inoutDepthSum[index / 4] = 0;
inoutColorsAndWeightSum[index + 0] = 0;
inoutColorsAndWeightSum[index + 1] = 0;
inoutColorsAndWeightSum[index + 2] = 0;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void CCUDAUnprojectFromDisparity_basic::UnprojectDisparities( CVImage_sptr& spPoints3D, CVImage_sptr& spPointsColors,
CVImage_sptr& spDepthmap, CVImage_sptr& spSynthImage,
const CVImage_sptr& spDisparties, const CVImage_sptr& spPlenopticImage)
{
// Ensure MONO+ALPHA or COLOR+ALPHA input image and single channel float disparities
if (((spPlenopticImage->CvMat().channels() != 1) && (spPlenopticImage->CvMat().channels() != 2) && (spPlenopticImage->CvMat().channels() != 4))
|| (spDisparties->type() != CV_32FC1))
{
throw CRuntimeException("CCUDAUnprojectFromDisparity_basic::Unproject : Invalid input images given.", ERuntimeExcpetionType::ILLEGAL_ARGUMENT);
}
cudaError_t e;
// Allocate and bind textures for input
CCUDAImageTexture texInputImage(spPlenopticImage);
CCUDAImageTexture texInputDisparities(spDisparties, false); // can't use normalized texture fetch for float
// Allocate destination image for 3D points
if (spPoints3D == nullptr)
{
spPoints3D = CVImage_sptr(new CVImage(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::Points3D));
}
else
{
spPoints3D->Reinit(SImageDataDescriptor(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::Points3D));
}
CCUDAImageArray<float> arrOutPoints3D(spPoints3D);
/// \todo check for illegal use of \ref cudaMemset. Observed CUDA SPECIAL : if set to 0 memory is compromised ('random' values occur?)
cudaMemset(arrOutPoints3D.GetDevicePointer(), 255, spPoints3D->bytecount());
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutPoints3D memset : \"")
+ std::string(cudaGetErrorString(e)));
}
// Allocate destination image for 3D points colors
if (spPointsColors == nullptr)
{
spPointsColors = CVImage_sptr(new CVImage(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::RGBA));
}
else
{
spPointsColors->Reinit(SImageDataDescriptor(spPlenopticImage->cols(), spPlenopticImage->rows(),
CV_32FC4, EImageType::RGBA));
}
CCUDAImageArray<float> arrOutPointsColors(spPointsColors);
cudaMemset(arrOutPointsColors.GetDevicePointer(), 1, spPointsColors->bytecount());
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutPointsColors memset : \"")
+ std::string(cudaGetErrorString(e)));
}
// Allocate depthmap and TF
if (spDepthmap == nullptr)
{
spDepthmap = CVImage_sptr(new CVImage(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC1, EImageType::GRAYDEPTH));
}
else
{
spDepthmap->Reinit(SImageDataDescriptor(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC1, EImageType::GRAYDEPTH));
}
CCUDAImageArray<float> arrOutDepthmap(spDepthmap);
cudaMemset(arrOutDepthmap.GetDevicePointer(), 0, spDepthmap->bytecount());
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutDepthmap memset : \"")
+ std::string(cudaGetErrorString(e)));
}
// Allocate depthmap and TF
if (spSynthImage == nullptr)
{
spSynthImage = CVImage_sptr(new CVImage(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC4, EImageType::RGBA));
}
else
{
spSynthImage->Reinit(SImageDataDescriptor(m_projTarget.vecRes.x, m_projTarget.vecRes.y,
CV_32FC4, EImageType::RGBA));
}
CCUDAImageArray<float> arrOutSynthImage(spSynthImage);
cudaMemset(arrOutSynthImage.GetDevicePointer(), 0, spSynthImage->bytecount());
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA arrOutTotalFocus memset : \"")
+ std::string(cudaGetErrorString(e)));
}
// Create CUDA parameter struct and upload to symbol
SLocalParams cudaParams;
cudaParams.descrMla = m_descrMLA;
cudaParams.projTarget = m_projTarget;
cudaParams.vUpperLeft.x = 0;
cudaParams.vUpperLeft.y = 0;
cudaParams.vLowerRight.x = spPlenopticImage->cols() - 1;
cudaParams.vLowerRight.y = spPlenopticImage->rows() - 1;
cudaParams.fMinNormedDisp = m_fMinNormedDisp;
cudaParams.fMaxNormedDisp = m_fMaxNormedDisp;
cudaMemcpyToSymbol(globalParams, &cudaParams, sizeof(SLocalParams));
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Estimate : CUDA copy-to-symbol : \"") + std::string(cudaGetErrorString(e)));
}
// create and start timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Create 32x32 pixel blocks
dim3 threadsPerBlock = dim3(32, 32);
dim3 blocks = dim3(spPlenopticImage->cols() / 32 + 1, spPlenopticImage->rows() / 32 + 1);
// Call kernel with appropriate channel count
if ((spPlenopticImage->CvMat().channels() == 2)&&(m_descrMLA.eGridType == EGridType::HEXAGONAL))
{
computeUnproject<2, EGridType::HEXAGONAL><<<blocks, threadsPerBlock>>>(arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
else if ((spPlenopticImage->CvMat().channels() == 2)&&(m_descrMLA.eGridType == EGridType::RECTANGULAR))
{
computeUnproject<2, EGridType::RECTANGULAR><<<blocks, threadsPerBlock>>>(arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
else if (m_descrMLA.eGridType == EGridType::HEXAGONAL)
{
computeUnproject<4, EGridType::HEXAGONAL><<<blocks, threadsPerBlock>>>(arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
else if (m_descrMLA.eGridType == EGridType::RECTANGULAR)
{
computeUnproject<4, EGridType::RECTANGULAR><<<blocks, threadsPerBlock>>>(arrOutPoints3D.GetDevicePointer(),
arrOutPointsColors.GetDevicePointer(),
arrOutDepthmap.GetDevicePointer(),
arrOutSynthImage.GetDevicePointer(),
texInputDisparities.GetTextureObject(),
texInputImage.GetTextureObject(),
spPlenopticImage->rows()*spPlenopticImage->cols());
}
// Wait for kernels to finish and check for errors
cudaDeviceSynchronize();
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA 'computeUnproject' launch error : \"") + std::string(cudaGetErrorString(e)));
}
// Call kernel for normalization of sum of depths/colors
computeNormalizeDepthmap<<<blocks, threadsPerBlock>>>(arrOutDepthmap.GetDevicePointer(), arrOutSynthImage.GetDevicePointer(),
m_projTarget.vecRes.x, m_projTarget.vecRes.y);
// Wait for kernels to finish
//cudaDeviceSynchronize();
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA 'computeNormalizeSums' launch error : \"") + std::string(cudaGetErrorString(e)));
}
// Query runtime
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("computeUnproject : %g [ms]\n", milliseconds);
if ((e = cudaGetLastError()) != 0)
{
throw CRuntimeException(std::string("CCUDAUnprojectFromDisparity_basic::Unproject : CUDA timing error : \"") + std::string(cudaGetErrorString(e)));
}
}
|
3da13914ae5ced5c3b2018862274418b5b1ab7fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
#include "cudaSourceScalarPotential.h"
#define BLOCKDIMX 18
#define BLOCKDIMY 18
__global__ void cukern_applyScalarPotential(double *rho, double *E, double *px, double *py, double *pz, double *phi, int3 arraysize);
/*mass.gputag, mom(1).gputag, mom(2).gputag, mom(3).gputag, ener.gputag, run.potentialField.gputag, 2*run.time.dTime);*/
__global__ void cukern_applyScalarPotential_2D(double *rho, double *E, double *px, double *py, double *phi, int3 arraysize);
__constant__ __device__ double devLambda[7];
#define LAMX devLambda[0]
#define LAMY devLambda[1]
#define LAMZ devLambda[2]
// Define: F = -beta * rho * grad(phi)
// rho_g = density for full effect of gravity
// rho_c = minimum density to feel gravity at all
// beta = { rho_g < rho : 1 }
// { rho_c < rho < rho_g : [(rho-rho_c)/(rho_rho_g-rho_c)]^2 }
// { rho < rho_c : 0 }
// This provides a continuous (though not differentiable at rho = rho_g) way to surpress gravitation of the background fluid
// The original process of cutting gravity off below a critical density a few times the minimum
// density is believed to cause "blowups" at the inner edge of circular flow profiles due to being
// discontinuous. If even smoothness is insufficient and smooth differentiability is required,
// a more-times-continuous profile can be constructed, but let's not go there unless forced.
// Density below which we force gravity effects to zero
#define RHOMIN devLambda[3]
#define RHOGRAV devLambda[4]
// 1 / (rho_g - rho_c)
#define G1 devLambda[5]
// rho_c / (rho_g - rho_c)
#define G2 devLambda[6]
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs!=10) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaApplyScalarPotential(rho, E, px, py, pz, phi, dt, d3x, rhomin, rho_fullg)\n");
if(CHECK_CUDA_ERROR("entering cudaSourceScalarPotential") != SUCCESSFUL) { DROP_MEX_ERROR("Failed upon entry to cudaSourceScalarPotential."); }
// Get source array info and create destination arrays
MGArray fluid[6];
int worked = MGA_accessMatlabArrays(prhs, 0, 5, fluid);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access input arrays."); }
// Each partition uses the same common parameters
double dt = *mxGetPr(prhs[6]);
double *dx = mxGetPr(prhs[7]);
double rhoMinimum = *mxGetPr(prhs[8]); /* minimum rho, rho_c */
double rhoFull = *mxGetPr(prhs[9]); /* rho_g */
worked = sourcefunction_ScalarPotential(&fluid[0], dt, dx, rhoMinimum, rhoFull);
}
int sourcefunction_ScalarPotential(MGArray *fluid, double dt, double *dx, double minRho, double rhoFullGravity)
{
dim3 gridsize, blocksize;
int3 arraysize;
int i, sub[6];
int worked;
double lambda[8];
lambda[0] = dt/(2.0*dx[0]);
lambda[1] = dt/(2.0*dx[1]);
lambda[2] = dt/(2.0*dx[2]);
lambda[3] = minRho; /* minimum rho, rho_c */
lambda[4] = rhoFullGravity; /* rho_g */
lambda[5] = 1.0/(lambda[4] - lambda[3]); /* 1/(rho_g - rho_c) */
lambda[6] = lambda[3]*lambda[5];
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
hipMemcpyToSymbol(devLambda, lambda, 7*sizeof(double), 0, hipMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("hipMemcpyToSymbol");
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
int isThreeD = (fluid->dim[2] > 1);
// Iterate over all partitions, and here we GO!
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, sub);
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(BLOCKDIMX, BLOCKDIMY, 1);
gridsize.x = arraysize.x / (blocksize.x - 2); gridsize.x += ((blocksize.x-2) * gridsize.x < arraysize.x);
gridsize.y = arraysize.y / (blocksize.y - 2); gridsize.y += ((blocksize.y-2) * gridsize.y < arraysize.y);
gridsize.z = 1;
if(isThreeD) {
hipLaunchKernelGGL(( cukern_applyScalarPotential), dim3(gridsize), dim3(blocksize), 0, 0,
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
fluid[5].devicePtr[i], arraysize);
} else {
hipLaunchKernelGGL(( cukern_applyScalarPotential_2D), dim3(gridsize), dim3(blocksize), 0, 0,
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[5].devicePtr[i], arraysize);
}
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "scalar potential kernel");
if(worked != SUCCESSFUL) break;
}
return CHECK_IMOGEN_ERROR(worked);
}
/*
* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*/
__global__ void cukern_applyScalarPotential(double *rho, double *E, double *px, double *py, double *pz, double *phi, int3 arraysize)
{
int myLocAddr = threadIdx.x + BLOCKDIMX*threadIdx.y;
int myX = threadIdx.x + (BLOCKDIMX-2)*blockIdx.x - 1;
int myY = threadIdx.y + (BLOCKDIMY-2)*blockIdx.y - 1;
if((myX > arraysize.x) || (myY > arraysize.y)) return;
bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (BLOCKDIMX-1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKDIMY-1));
IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y);
myX = (myX + arraysize.x) % arraysize.x;
myY = (myY + arraysize.y) % arraysize.y;
int globAddr = myX + arraysize.x*myY;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
__shared__ double phiA[BLOCKDIMX*BLOCKDIMY];
__shared__ double phiB[BLOCKDIMX*BLOCKDIMY];
__shared__ double phiC[BLOCKDIMX*BLOCKDIMY];
__shared__ double locrho[BLOCKDIMX*BLOCKDIMY];
__shared__ double ener[BLOCKDIMX*BLOCKDIMY];
double *U; double *V; double *W;
double *temp;
U = phiA; V = phiB; W = phiC;
// Preload lower and middle planes
U[myLocAddr] = phi[globAddr + arraysize.x*arraysize.y*(arraysize.z-1)];
V[myLocAddr] = phi[globAddr];
__syncthreads();
int z;
int deltaz = arraysize.x*arraysize.y;
for(z = 0; z < arraysize.z; z++) {
if(z >= arraysize.z - 1) deltaz = - arraysize.x*arraysize.y*(arraysize.z-1);
ener[myLocAddr] = 0;
locrho[myLocAddr] = rho[globAddr]; // rho(z) -> rho
W[myLocAddr] = px[globAddr]; // load px(z) -> phiC
__syncthreads();
if(IWrite && (locrho[myLocAddr] > rhomin)) {
deltaphi = devLambda[0]*(V[myLocAddr+1]-V[myLocAddr-1]);
if(locrho[myLocAddr] < RHOGRAV) { deltaphi *= (locrho[myLocAddr]*G1 - G2); } // reduce G for low density
ener[myLocAddr] -= deltaphi*W[myLocAddr]; // ener -= dt * px * dphi/dx
px[globAddr] = W[myLocAddr] - deltaphi*locrho[myLocAddr]; // store px <- px - dt * rho dphi/dx;
}
W[myLocAddr] = py[globAddr]; // load py(z) -> phiC
__syncthreads();
if(IWrite && (locrho[myLocAddr] > rhomin)) {
deltaphi = devLambda[1]*(V[myLocAddr+BLOCKDIMX]-V[myLocAddr-BLOCKDIMX]);
if(locrho[myLocAddr] < RHOGRAV) { deltaphi *= (locrho[myLocAddr]*G1 - G2); } // reduce G for low density
ener[myLocAddr] -= deltaphi*W[myLocAddr]; // ener -= dt * py * dphi/dy
py[globAddr] = W[myLocAddr] - deltaphi*locrho[myLocAddr]; // store py <- py - rho dphi/dy;
}
W[myLocAddr] = phi[globAddr + deltaz]; // load phi(z+1) -> phiC
__syncthreads();
deltaphi = devLambda[2]*(W[myLocAddr] - U[myLocAddr]);
if(locrho[myLocAddr] < RHOGRAV) { deltaphi *= (locrho[myLocAddr]*G1 - G2); } // reduce G for low density
__syncthreads();
U[myLocAddr] = pz[globAddr]; // load pz(z) -> phiA
__syncthreads();
if(IWrite && (locrho[myLocAddr] > rhomin)) {
E[globAddr] += ener[myLocAddr] - deltaphi*U[myLocAddr]; // Store E[x] <- ener - dt *pz * dphi/dz
pz[globAddr] = U[myLocAddr] - deltaphi*locrho[myLocAddr]; // store pz <- pz - rho dphi/dz;
}
temp = U; U = V; V = W; W = temp; // cyclically shift them back
globAddr += arraysize.x * arraysize.y;
}
}
/*
* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*/
__global__ void cukern_applyScalarPotential_2D(double *rho, double *E, double *px, double *py, double *phi, int3 arraysize)
{
int myLocAddr = threadIdx.x + BLOCKDIMX*threadIdx.y;
int myX = threadIdx.x + (BLOCKDIMX-2)*blockIdx.x - 1;
int myY = threadIdx.y + (BLOCKDIMY-2)*blockIdx.y - 1;
if((myX > arraysize.x) || (myY > arraysize.y)) return;
bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (BLOCKDIMX-1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKDIMY-1));
IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y);
myX = (myX + arraysize.x) % arraysize.x;
myY = (myY + arraysize.y) % arraysize.y;
int globAddr = myX + arraysize.x*myY;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
double tmpMom;
__shared__ double phiLoc[BLOCKDIMX*BLOCKDIMY];
__shared__ double rhoLoc[BLOCKDIMX*BLOCKDIMY];
double enerLoc = 0.0;
rhoLoc[myLocAddr] = rho[globAddr]; // rho(z) -> rho
phiLoc[myLocAddr] = phi[globAddr];
__syncthreads(); // Make sure loaded phi is visible
// coupling is exactly zero if rho <= rhomin
if(IWrite && (rhoLoc[myLocAddr] > rhomin)) {
// compute dt * (dphi/dx)
deltaphi = devLambda[0]*(phiLoc[myLocAddr+1]-phiLoc[myLocAddr-1]);
// reduce coupling for low densities
if(rhoLoc[myLocAddr] < RHOGRAV) { deltaphi *= (rhoLoc[myLocAddr]*G1 - G2); }
// Load px
tmpMom = px[globAddr];
// Store delta-E due to change in x momentum: ener -= (dt * dphi/dx) * (px = rho vx) -= rho delta-phi
enerLoc -= deltaphi*tmpMom;
// Update X momentum
px[globAddr] = tmpMom - deltaphi*rhoLoc[myLocAddr]; // store px <- px - dt * rho dphi/dx;
// Calculate dt*(dphi/dy)
deltaphi = devLambda[1]*(phiLoc[myLocAddr+BLOCKDIMX]-phiLoc[myLocAddr-BLOCKDIMX]);
// reduce G for low density
if(rhoLoc[myLocAddr] < RHOGRAV) { deltaphi *= (rhoLoc[myLocAddr]*G1 - G2); }
// Load py
tmpMom = py[globAddr];
// Update global energy array with this & previous delta-E values
E[globAddr] += enerLoc - deltaphi*tmpMom; // ener -= dt * py * dphi/dy
// Update Y momentum array
py[globAddr] = tmpMom - deltaphi*rhoLoc[myLocAddr]; // store py <- py - rho dphi/dy;
}
}
| 3da13914ae5ced5c3b2018862274418b5b1ab7fd.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
#include "cudaSourceScalarPotential.h"
#define BLOCKDIMX 18
#define BLOCKDIMY 18
__global__ void cukern_applyScalarPotential(double *rho, double *E, double *px, double *py, double *pz, double *phi, int3 arraysize);
/*mass.gputag, mom(1).gputag, mom(2).gputag, mom(3).gputag, ener.gputag, run.potentialField.gputag, 2*run.time.dTime);*/
__global__ void cukern_applyScalarPotential_2D(double *rho, double *E, double *px, double *py, double *phi, int3 arraysize);
__constant__ __device__ double devLambda[7];
#define LAMX devLambda[0]
#define LAMY devLambda[1]
#define LAMZ devLambda[2]
// Define: F = -beta * rho * grad(phi)
// rho_g = density for full effect of gravity
// rho_c = minimum density to feel gravity at all
// beta = { rho_g < rho : 1 }
// { rho_c < rho < rho_g : [(rho-rho_c)/(rho_rho_g-rho_c)]^2 }
// { rho < rho_c : 0 }
// This provides a continuous (though not differentiable at rho = rho_g) way to surpress gravitation of the background fluid
// The original process of cutting gravity off below a critical density a few times the minimum
// density is believed to cause "blowups" at the inner edge of circular flow profiles due to being
// discontinuous. If even smoothness is insufficient and smooth differentiability is required,
// a more-times-continuous profile can be constructed, but let's not go there unless forced.
// Density below which we force gravity effects to zero
#define RHOMIN devLambda[3]
#define RHOGRAV devLambda[4]
// 1 / (rho_g - rho_c)
#define G1 devLambda[5]
// rho_c / (rho_g - rho_c)
#define G2 devLambda[6]
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs!=10) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaApplyScalarPotential(rho, E, px, py, pz, phi, dt, d3x, rhomin, rho_fullg)\n");
if(CHECK_CUDA_ERROR("entering cudaSourceScalarPotential") != SUCCESSFUL) { DROP_MEX_ERROR("Failed upon entry to cudaSourceScalarPotential."); }
// Get source array info and create destination arrays
MGArray fluid[6];
int worked = MGA_accessMatlabArrays(prhs, 0, 5, fluid);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access input arrays."); }
// Each partition uses the same common parameters
double dt = *mxGetPr(prhs[6]);
double *dx = mxGetPr(prhs[7]);
double rhoMinimum = *mxGetPr(prhs[8]); /* minimum rho, rho_c */
double rhoFull = *mxGetPr(prhs[9]); /* rho_g */
worked = sourcefunction_ScalarPotential(&fluid[0], dt, dx, rhoMinimum, rhoFull);
}
int sourcefunction_ScalarPotential(MGArray *fluid, double dt, double *dx, double minRho, double rhoFullGravity)
{
dim3 gridsize, blocksize;
int3 arraysize;
int i, sub[6];
int worked;
double lambda[8];
lambda[0] = dt/(2.0*dx[0]);
lambda[1] = dt/(2.0*dx[1]);
lambda[2] = dt/(2.0*dx[2]);
lambda[3] = minRho; /* minimum rho, rho_c */
lambda[4] = rhoFullGravity; /* rho_g */
lambda[5] = 1.0/(lambda[4] - lambda[3]); /* 1/(rho_g - rho_c) */
lambda[6] = lambda[3]*lambda[5];
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
cudaMemcpyToSymbol(devLambda, lambda, 7*sizeof(double), 0, cudaMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("cudaMemcpyToSymbol");
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
int isThreeD = (fluid->dim[2] > 1);
// Iterate over all partitions, and here we GO!
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, sub);
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(BLOCKDIMX, BLOCKDIMY, 1);
gridsize.x = arraysize.x / (blocksize.x - 2); gridsize.x += ((blocksize.x-2) * gridsize.x < arraysize.x);
gridsize.y = arraysize.y / (blocksize.y - 2); gridsize.y += ((blocksize.y-2) * gridsize.y < arraysize.y);
gridsize.z = 1;
if(isThreeD) {
cukern_applyScalarPotential<<<gridsize, blocksize>>>(
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
fluid[5].devicePtr[i], arraysize);
} else {
cukern_applyScalarPotential_2D<<<gridsize, blocksize>>>(
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[5].devicePtr[i], arraysize);
}
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "scalar potential kernel");
if(worked != SUCCESSFUL) break;
}
return CHECK_IMOGEN_ERROR(worked);
}
/*
* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*/
__global__ void cukern_applyScalarPotential(double *rho, double *E, double *px, double *py, double *pz, double *phi, int3 arraysize)
{
int myLocAddr = threadIdx.x + BLOCKDIMX*threadIdx.y;
int myX = threadIdx.x + (BLOCKDIMX-2)*blockIdx.x - 1;
int myY = threadIdx.y + (BLOCKDIMY-2)*blockIdx.y - 1;
if((myX > arraysize.x) || (myY > arraysize.y)) return;
bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (BLOCKDIMX-1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKDIMY-1));
IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y);
myX = (myX + arraysize.x) % arraysize.x;
myY = (myY + arraysize.y) % arraysize.y;
int globAddr = myX + arraysize.x*myY;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
__shared__ double phiA[BLOCKDIMX*BLOCKDIMY];
__shared__ double phiB[BLOCKDIMX*BLOCKDIMY];
__shared__ double phiC[BLOCKDIMX*BLOCKDIMY];
__shared__ double locrho[BLOCKDIMX*BLOCKDIMY];
__shared__ double ener[BLOCKDIMX*BLOCKDIMY];
double *U; double *V; double *W;
double *temp;
U = phiA; V = phiB; W = phiC;
// Preload lower and middle planes
U[myLocAddr] = phi[globAddr + arraysize.x*arraysize.y*(arraysize.z-1)];
V[myLocAddr] = phi[globAddr];
__syncthreads();
int z;
int deltaz = arraysize.x*arraysize.y;
for(z = 0; z < arraysize.z; z++) {
if(z >= arraysize.z - 1) deltaz = - arraysize.x*arraysize.y*(arraysize.z-1);
ener[myLocAddr] = 0;
locrho[myLocAddr] = rho[globAddr]; // rho(z) -> rho
W[myLocAddr] = px[globAddr]; // load px(z) -> phiC
__syncthreads();
if(IWrite && (locrho[myLocAddr] > rhomin)) {
deltaphi = devLambda[0]*(V[myLocAddr+1]-V[myLocAddr-1]);
if(locrho[myLocAddr] < RHOGRAV) { deltaphi *= (locrho[myLocAddr]*G1 - G2); } // reduce G for low density
ener[myLocAddr] -= deltaphi*W[myLocAddr]; // ener -= dt * px * dphi/dx
px[globAddr] = W[myLocAddr] - deltaphi*locrho[myLocAddr]; // store px <- px - dt * rho dphi/dx;
}
W[myLocAddr] = py[globAddr]; // load py(z) -> phiC
__syncthreads();
if(IWrite && (locrho[myLocAddr] > rhomin)) {
deltaphi = devLambda[1]*(V[myLocAddr+BLOCKDIMX]-V[myLocAddr-BLOCKDIMX]);
if(locrho[myLocAddr] < RHOGRAV) { deltaphi *= (locrho[myLocAddr]*G1 - G2); } // reduce G for low density
ener[myLocAddr] -= deltaphi*W[myLocAddr]; // ener -= dt * py * dphi/dy
py[globAddr] = W[myLocAddr] - deltaphi*locrho[myLocAddr]; // store py <- py - rho dphi/dy;
}
W[myLocAddr] = phi[globAddr + deltaz]; // load phi(z+1) -> phiC
__syncthreads();
deltaphi = devLambda[2]*(W[myLocAddr] - U[myLocAddr]);
if(locrho[myLocAddr] < RHOGRAV) { deltaphi *= (locrho[myLocAddr]*G1 - G2); } // reduce G for low density
__syncthreads();
U[myLocAddr] = pz[globAddr]; // load pz(z) -> phiA
__syncthreads();
if(IWrite && (locrho[myLocAddr] > rhomin)) {
E[globAddr] += ener[myLocAddr] - deltaphi*U[myLocAddr]; // Store E[x] <- ener - dt *pz * dphi/dz
pz[globAddr] = U[myLocAddr] - deltaphi*locrho[myLocAddr]; // store pz <- pz - rho dphi/dz;
}
temp = U; U = V; V = W; W = temp; // cyclically shift them back
globAddr += arraysize.x * arraysize.y;
}
}
/*
* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*/
__global__ void cukern_applyScalarPotential_2D(double *rho, double *E, double *px, double *py, double *phi, int3 arraysize)
{
int myLocAddr = threadIdx.x + BLOCKDIMX*threadIdx.y;
int myX = threadIdx.x + (BLOCKDIMX-2)*blockIdx.x - 1;
int myY = threadIdx.y + (BLOCKDIMY-2)*blockIdx.y - 1;
if((myX > arraysize.x) || (myY > arraysize.y)) return;
bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (BLOCKDIMX-1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKDIMY-1));
IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y);
myX = (myX + arraysize.x) % arraysize.x;
myY = (myY + arraysize.y) % arraysize.y;
int globAddr = myX + arraysize.x*myY;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
double tmpMom;
__shared__ double phiLoc[BLOCKDIMX*BLOCKDIMY];
__shared__ double rhoLoc[BLOCKDIMX*BLOCKDIMY];
double enerLoc = 0.0;
rhoLoc[myLocAddr] = rho[globAddr]; // rho(z) -> rho
phiLoc[myLocAddr] = phi[globAddr];
__syncthreads(); // Make sure loaded phi is visible
// coupling is exactly zero if rho <= rhomin
if(IWrite && (rhoLoc[myLocAddr] > rhomin)) {
// compute dt * (dphi/dx)
deltaphi = devLambda[0]*(phiLoc[myLocAddr+1]-phiLoc[myLocAddr-1]);
// reduce coupling for low densities
if(rhoLoc[myLocAddr] < RHOGRAV) { deltaphi *= (rhoLoc[myLocAddr]*G1 - G2); }
// Load px
tmpMom = px[globAddr];
// Store delta-E due to change in x momentum: ener -= (dt * dphi/dx) * (px = rho vx) -= rho delta-phi
enerLoc -= deltaphi*tmpMom;
// Update X momentum
px[globAddr] = tmpMom - deltaphi*rhoLoc[myLocAddr]; // store px <- px - dt * rho dphi/dx;
// Calculate dt*(dphi/dy)
deltaphi = devLambda[1]*(phiLoc[myLocAddr+BLOCKDIMX]-phiLoc[myLocAddr-BLOCKDIMX]);
// reduce G for low density
if(rhoLoc[myLocAddr] < RHOGRAV) { deltaphi *= (rhoLoc[myLocAddr]*G1 - G2); }
// Load py
tmpMom = py[globAddr];
// Update global energy array with this & previous delta-E values
E[globAddr] += enerLoc - deltaphi*tmpMom; // ener -= dt * py * dphi/dy
// Update Y momentum array
py[globAddr] = tmpMom - deltaphi*rhoLoc[myLocAddr]; // store py <- py - rho dphi/dy;
}
}
|
d95e0c1d3da60a8cd62586361ad48402ada567c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_print( size_t const* p, int n)
{
printf("ulong: %d ",n);
for(int i=0; i<n; i++)
printf("%lu ",*(p+i));
} | d95e0c1d3da60a8cd62586361ad48402ada567c0.cu | #include "includes.h"
__global__ void kernel_print( size_t const* p, int n)
{
printf("ulong: %d ",n);
for(int i=0; i<n; i++)
printf("%lu ",*(p+i));
} |
0143719fb38ea067d2a614080b4f94b09a70dafa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file s6_gpu_standalone.cu
* SERENDIP6 - Stand-Alone GPU Implementation
*
* @author Jayanth Chennamangalam
* @date 2013.07.19
*/
#include "s6_gpu_standalone.tex.h"
int g_iIsDataReadDone = FALSE;
int g_iIsProcDone = FALSE;
int g_iMaxThreadsPerBlock = 0;
char4* g_pc4InBuf = NULL;
char4* g_pc4InBufRead = NULL;
int g_iSizeFile = 0;
int g_iReadCount = 0;
char4* g_pc4Data_d = NULL; /* raw data starting address */
char4* g_pc4DataRead_d = NULL; /* raw data read pointer */
int g_iNFFT = DEF_LEN_SPEC;
dim3 g_dimBCopy(1, 1, 1);
dim3 g_dimGCopy(1, 1);
dim3 g_dimBAccum(1, 1, 1);
dim3 g_dimGAccum(1, 1);
float4* g_pf4FFTIn_d = NULL;
float4* g_pf4FFTOut_d = NULL;
hipfftHandle g_stPlan = {0};
float4** g_ppf4SumStokes = NULL;
float4** g_ppf4SumStokes_d = NULL;
char g_acFileData[256] = {0};
int g_iNumSubBands = DEF_NUM_SUBBANDS;
long int g_lReqBytes = 0;
int g_iNumConcFFT = 4; /* number of channels that are to be FFT'd concurrently */
int g_iNumChanBlocks = 0;
int g_iAccID = 0;
int g_iReadID = 0;
texture<char4, 1, hipReadModeNormalizedFloat> g_stTexRefData;
hipChannelFormatDesc g_stChanDescData;
#if PLOT
float* g_pfSumPowX = NULL;
float* g_pfSumPowY = NULL;
float* g_pfSumStokesRe = NULL;
float* g_pfSumStokesIm = NULL;
float* g_pfFreq = NULL;
float g_fFSamp = 1.0; /* 1 [frequency] */
#endif
#if BENCHMARKING
float g_fTimeCpIn = 0.0;
float g_fTotCpIn = 0.0;
int g_iCountCpIn = 0;
hipEvent_t g_cuStart;
hipEvent_t g_cuStop;
#endif
int main(int argc, char *argv[])
{
int iRet = EXIT_SUCCESS;
int iSpecCount = 0;
int iNumAcc = DEF_ACC;
hipError_t iCUDARet = hipSuccess;
#if BENCHMARKING
float fTimeCpInFFT = 0.0;
float fTotCpInFFT = 0.0;
int iCountCpInFFT = 0;
float fTimeFFT = 0.0;
float fTotFFT = 0.0;
int iCountFFT = 0;
float fTimeCpOut = 0.0;
float fTotCpOut = 0.0;
int iCountCpOut = 0;
float fTimeAccum = 0.0;
float fTotAccum = 0.0;
int iCountAccum = 0;
#else
struct timeval stStart = {0};
struct timeval stStop = {0};
#endif
#if OUTFILE
int iFileSpec = 0;
#endif
const char *pcProgName = NULL;
int iNextOpt = 0;
/* valid short options */
#if PLOT
const char* const pcOptsShort = "hb:n:a:s:";
#else
const char* const pcOptsShort = "hb:n:a:";
#endif
/* valid long options */
const struct option stOptsLong[] = {
{ "help", 0, NULL, 'h' },
{ "nsub", 1, NULL, 'b' },
{ "nfft", 1, NULL, 'n' },
{ "nacc", 1, NULL, 'a' },
#if PLOT
{ "fsamp", 1, NULL, 's' },
#endif
{ NULL, 0, NULL, 0 }
};
/* get the filename of the program from the argument list */
pcProgName = argv[0];
/* parse the input */
do
{
iNextOpt = getopt_long(argc, argv, pcOptsShort, stOptsLong, NULL);
switch (iNextOpt)
{
case 'h': /* -h or --help */
/* print usage info and terminate */
PrintUsage(pcProgName);
return EXIT_SUCCESS;
case 'b': /* -b or --nsub */
/* set option */
g_iNumSubBands = (int) atoi(optarg);
break;
case 'n': /* -n or --nfft */
/* set option */
g_iNFFT = (int) atoi(optarg);
break;
case 'a': /* -a or --nacc */
/* set option */
iNumAcc = (int) atoi(optarg);
break;
#if PLOT
case 's': /* -s or --fsamp */
/* set option */
g_fFSamp = (float) atof(optarg);
break;
#endif
case '?': /* user specified an invalid option */
/* print usage info and terminate with error */
(void) fprintf(stderr, "ERROR: Invalid option!\n");
PrintUsage(pcProgName);
return EXIT_FAILURE;
case -1: /* done with options */
break;
default: /* unexpected */
assert(0);
}
} while (iNextOpt != -1);
/* no arguments */
if (argc <= optind)
{
(void) fprintf(stderr, "ERROR: Data file not specified!\n");
PrintUsage(pcProgName);
return EXIT_FAILURE;
}
(void) strncpy(g_acFileData, argv[optind], 256);
g_acFileData[255] = '\0';
#if BENCHMARKING
(void) printf("* Benchmarking run commencing...\n");
#endif
/* initialise */
iRet = Init();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! Init failed!\n");
CleanUp();
return EXIT_FAILURE;
}
#if OUTFILE
iFileSpec = open("spec.dat",
O_CREAT | O_TRUNC | O_WRONLY,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (iFileSpec < EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! Opening spectrum file failed!\n");
CleanUp();
return EXIT_FAILURE;
}
#endif
#if (!BENCHMARKING)
(void) gettimeofday(&stStart, NULL);
#endif
while (!g_iIsProcDone)
{
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart));
#endif
hipLaunchKernelGGL(( CopyDataForFFT), dim3(g_dimGCopy), dim3(g_dimBCopy), 0, 0, g_pc4DataRead_d,
g_pf4FFTIn_d);
CUDASafeCallWithCleanUp(hipDeviceSynchronize());
iCUDARet = hipGetLastError();
if (iCUDARet != hipSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
hipGetErrorString(iCUDARet));
/* free resources */
CleanUp();
return EXIT_FAILURE;
}
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeCpInFFT,
g_cuStart,
g_cuStop));
fTotCpInFFT += fTimeCpInFFT;
++iCountCpInFFT;
#endif
/* do fft */
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart));
#endif
iRet = DoFFT();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! FFT failed!\n");
#if OUTFILE
(void) close(iFileSpec);
#endif
CleanUp();
return EXIT_FAILURE;
}
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeFFT,
g_cuStart,
g_cuStop));
fTotFFT += fTimeFFT;
++iCountFFT;
#endif
/* accumulate power x, power y, stokes, if the blanking bit is
not set */
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart));
#endif
hipLaunchKernelGGL(( Accumulate), dim3(g_dimGAccum), dim3(g_dimBAccum), 0, 0, g_pf4FFTOut_d,
g_ppf4SumStokes_d[g_iAccID]);
CUDASafeCallWithCleanUp(hipDeviceSynchronize());
iCUDARet = hipGetLastError();
if (iCUDARet != hipSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
hipGetErrorString(iCUDARet));
/* free resources */
CleanUp();
return EXIT_FAILURE;
}
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeAccum,
g_cuStart,
g_cuStop));
fTotAccum += fTimeAccum;
++iCountAccum;
#endif
if (0 == g_iAccID)
{
++iSpecCount;
}
if (iSpecCount == iNumAcc)
{
/* dump to buffer */
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart));
#endif
CUDASafeCallWithCleanUp(hipMemcpy(g_ppf4SumStokes[g_iAccID],
g_ppf4SumStokes_d[g_iAccID],
(g_iNumConcFFT
* g_iNFFT
* sizeof(float4)),
hipMemcpyDeviceToHost));
#if PLOT
/* NOTE: Plot() will modify data! */
Plot(g_iAccID);
(void) usleep(500000);
#endif
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(hipEventElapsedTime(&fTimeCpOut,
g_cuStart,
g_cuStop));
fTotCpOut += fTimeCpOut;
++iCountCpOut;
#endif
#if OUTFILE
(void) write(iFileSpec,
g_ppf4SumStokes[g_iAccID],
g_iNumConcFFT * g_iNFFT * sizeof(float4));
#endif
/* reset time */
if ((g_iNumChanBlocks - 1) == g_iAccID)
{
iSpecCount = 0;
}
/* zero accumulators */
CUDASafeCallWithCleanUp(hipMemset(g_ppf4SumStokes_d[g_iAccID],
'\0',
(g_iNumConcFFT
* g_iNFFT
* sizeof(float4))));
}
if (!g_iIsDataReadDone)
{
/* read data from input buffer */
iRet = ReadData();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Data reading failed!\n");
break;
}
g_iAccID = (g_iAccID + 1) % g_iNumChanBlocks;
}
else /* no more data to be read */
{
g_iIsProcDone = TRUE;
}
}
#if (!BENCHMARKING)
(void) gettimeofday(&stStop, NULL);
(void) printf("Time taken (barring Init()): %gs\n",
((stStop.tv_sec + (stStop.tv_usec * USEC2SEC))
- (stStart.tv_sec + (stStart.tv_usec * USEC2SEC))));
#endif
#if OUTFILE
(void) close(iFileSpec);
#endif
CleanUp();
#if BENCHMARKING
PrintBenchmarks(fTotCpInFFT,
iCountCpInFFT,
fTotFFT,
iCountFFT,
fTotAccum,
iCountAccum,
fTotCpOut,
iCountCpOut);
CUDASafeCallWithCleanUp(hipEventDestroy(g_cuStart));
CUDASafeCallWithCleanUp(hipEventDestroy(g_cuStop));
(void) printf("* Events destroyed.\n");
(void) printf("* Benchmarking run completed.\n");
#endif
return EXIT_SUCCESS;
}
/* function that creates the FFT plan, allocates memory, initialises counters,
etc. */
int Init()
{
int iDevCount = 0;
hipDeviceProp_t stDevProp = {0};
int iRet = EXIT_SUCCESS;
hipfftResult iCUFFTRet = HIPFFT_SUCCESS;
size_t lTotCUDAMalloc = 0;
int i = 0;
iRet = RegisterSignalHandlers();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n");
return EXIT_FAILURE;
}
/* since CUDASafeCallWithCleanUp() calls hipGetErrorString(),
it should not be used here - will cause crash if no CUDA device is
found */
(void) hipGetDeviceCount(&iDevCount);
if (0 == iDevCount)
{
(void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n");
return EXIT_FAILURE;
}
else if (iDevCount > 1)
{
/* TODO: figure this out */
(void) fprintf(stderr,
"ERROR: More than one CUDA-capable device "
"found! Don't know how to proceed!\n");
return EXIT_FAILURE;
}
/* TODO: make it automagic */
CUDASafeCallWithCleanUp(hipSetDevice(0));
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventCreate(&g_cuStart));
CUDASafeCallWithCleanUp(hipEventCreate(&g_cuStop));
(void) printf("* Events created.\n");
#endif
CUDASafeCallWithCleanUp(hipGetDeviceProperties(&stDevProp, 0));
g_iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock;
g_iNumChanBlocks = g_iNumSubBands / g_iNumConcFFT;
lTotCUDAMalloc += g_iNumConcFFT * g_iNFFT * sizeof(char4);
lTotCUDAMalloc += (g_iNumConcFFT * g_iNFFT * sizeof(float4));
lTotCUDAMalloc += (g_iNumConcFFT * g_iNFFT * sizeof(float4));
lTotCUDAMalloc += (g_iNumConcFFT * g_iNFFT * sizeof(float4));
if (lTotCUDAMalloc > stDevProp.totalGlobalMem)
{
(void) fprintf(stderr,
"ERROR: Total memory requested on GPU is %g of a "
"possible %g MB. Memory request break-up:\n"
" Input data buffer: %g MB\n"
" FFT in array: %g MB\n"
" FFT out array: %g MB\n"
" Stokes output array: %g MB\n",
((float) lTotCUDAMalloc) / (1024 * 1024),
((float) stDevProp.totalGlobalMem) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(char4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024));
return EXIT_FAILURE;
}
#ifdef DEBUG
else
{
(void) printf("INFO: Total memory requested on GPU is %g of a "
"possible %g MB. Memory request break-up:\n"
" Input data buffer: %g MB\n"
" FFT in array: %g MB\n"
" FFT out array: %g MB\n"
" Stokes output array: %g MB\n",
((float) lTotCUDAMalloc) / (1024 * 1024),
((float) stDevProp.totalGlobalMem) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(char4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024));
}
#endif
/* allocate memory for data array - 32MB is the block size for the VEGAS
input buffer */
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pc4Data_d, g_iNumConcFFT * g_iNFFT * sizeof(char4)));
g_pc4DataRead_d = g_pc4Data_d;
/* load data into memory */
iRet = LoadDataToMem();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! Loading to memory failed!\n");
return EXIT_FAILURE;
}
/* calculate kernel parameters */
if (g_iNFFT < g_iMaxThreadsPerBlock)
{
g_dimBCopy.x = g_iNFFT;
g_dimBAccum.x = g_iNFFT;
}
else
{
g_dimBCopy.x = g_iMaxThreadsPerBlock;
g_dimBAccum.x = g_iMaxThreadsPerBlock;
}
g_dimGCopy.x = (g_iNumConcFFT * g_iNFFT) / g_iMaxThreadsPerBlock;
g_dimGAccum.x = (g_iNumConcFFT * g_iNFFT) / g_iMaxThreadsPerBlock;
iRet = ReadData();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Reading data failed!\n");
return EXIT_FAILURE;
}
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTIn_d,
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_pf4FFTOut_d,
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
g_ppf4SumStokes = (float4 **) malloc(g_iNumChanBlocks * sizeof(float4 **));
if (NULL == g_ppf4SumStokes)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
for (i = 0; i < g_iNumChanBlocks; ++i)
{
g_ppf4SumStokes[i] = (float4 *) malloc(g_iNumConcFFT
* g_iNFFT
* sizeof(float4));
if (NULL == g_ppf4SumStokes[i])
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
}
g_ppf4SumStokes_d = (float4 **) malloc(g_iNumChanBlocks * sizeof(float4 **));
if (NULL == g_ppf4SumStokes_d)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
for (i = 0; i < g_iNumChanBlocks; ++i)
{
CUDASafeCallWithCleanUp(hipMalloc((void **) &g_ppf4SumStokes_d[i],
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
CUDASafeCallWithCleanUp(hipMemset(g_ppf4SumStokes_d[i],
'\0',
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
}
g_stChanDescData = hipCreateChannelDesc<signed char>();
CUDASafeCallWithCleanUp(hipBindTexture(0,
&g_stTexRefData,
g_pc4Data_d,
&g_stChanDescData,
g_iNumConcFFT * g_iNFFT * sizeof(char4)));
/* create plan */
iCUFFTRet = hipfftPlanMany(&g_stPlan,
FFTPLAN_RANK,
&g_iNFFT,
&g_iNFFT,
FFTPLAN_ISTRIDE,
FFTPLAN_IDIST,
&g_iNFFT,
FFTPLAN_OSTRIDE,
FFTPLAN_ODIST,
HIPFFT_C2C,
FFTPLAN_BATCH);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan creation failed!\n");
return EXIT_FAILURE;
}
#if PLOT
iRet = InitPlot();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Plotting initialisation failed!\n");
return EXIT_FAILURE;
}
#endif
return EXIT_SUCCESS;
}
/* function that reads data from the data file and loads it into memory during
initialisation */
int LoadDataToMem()
{
struct stat stFileStats = {0};
int iRet = EXIT_SUCCESS;
int iFileData = 0;
iRet = stat(g_acFileData, &stFileStats);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Failed to stat %s: %s!\n",
g_acFileData,
strerror(errno));
return EXIT_FAILURE;
}
g_iSizeFile = stFileStats.st_size;
g_pc4InBuf = (char4*) malloc(g_iSizeFile);
if (NULL == g_pc4InBuf)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
iFileData = open(g_acFileData, O_RDONLY);
if (iFileData < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR! Opening data file %s failed! %s.\n",
g_acFileData,
strerror(errno));
return EXIT_FAILURE;
}
iRet = read(iFileData, g_pc4InBuf, g_iSizeFile);
if (iRet < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Data reading failed! %s.\n",
strerror(errno));
(void) close(iFileData);
return EXIT_FAILURE;
}
else if (iRet != stFileStats.st_size)
{
(void) printf("File read done!\n");
}
(void) close(iFileData);
/* set the read pointer to the beginning of the data array */
g_pc4InBufRead = g_pc4InBuf;
return EXIT_SUCCESS;
}
/* function that reads data from input buffer */
int ReadData()
{
//printf("%d, %ld\n", g_iAccID, (g_pc4InBufRead-g_pc4InBuf)*sizeof(char4));
/* write new data to the write buffer */
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStart));
#endif
/* strided copy of g_iNumConcFFT channels (* 2 polarizations) */
CUDASafeCallWithCleanUp(hipMemcpy2D(g_pc4Data_d,
g_iNumConcFFT * sizeof(char4), /* dest. pitch */
g_pc4InBufRead,
g_iNumSubBands * sizeof(char4), /* src. pitch */
g_iNumConcFFT * sizeof(char4),
g_iNFFT,
hipMemcpyHostToDevice));
#if BENCHMARKING
CUDASafeCallWithCleanUp(hipEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(hipEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(hipEventElapsedTime(&g_fTimeCpIn,
g_cuStart,
g_cuStop));
g_fTotCpIn += g_fTimeCpIn;
++g_iCountCpIn;
#endif
/* update the read pointer to where data needs to be read in from, in the
next read */
if (g_iNumConcFFT == g_iNumSubBands)
{
g_pc4InBufRead += (g_iNumSubBands * g_iNFFT);
}
else
{
if ((g_iNumChanBlocks - 1) == g_iReadID)
{
g_pc4InBufRead += (g_iNumConcFFT % g_iNumSubBands);
g_pc4InBufRead += (g_iNumSubBands * (g_iNFFT - 1));
}
else
{
g_pc4InBufRead += g_iNumConcFFT;
}
}
/* whenever there is a read, reset the read pointer to the beginning */
g_pc4DataRead_d = g_pc4Data_d;
++g_iReadCount;
/* a buggy way to check for end of data */
if ((((char *) g_pc4InBuf) + g_iSizeFile) - ((char *) g_pc4InBufRead)
<= (g_iNumSubBands * g_iNFFT * sizeof(char4)))
{
(void) printf("Data read done! Read count = %d\n", g_iReadCount);
g_iIsDataReadDone = TRUE;
}
g_iReadID = (g_iReadID + 1) % g_iNumChanBlocks;
return EXIT_SUCCESS;
}
__global__ void CopyDataForFFT(char4 *pc4Data,
float4 *pf4FFTIn)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
/*
pf4FFTIn[i].x = (float) pc4Data[i].x;
pf4FFTIn[i].y = (float) pc4Data[i].y;
pf4FFTIn[i].z = (float) pc4Data[i].z;
pf4FFTIn[i].w = (float) pc4Data[i].w;
*/
*pf4FFTIn = tex1Dfetch(g_stTexRefData, i);
return;
}
/* function that performs the FFT */
int DoFFT()
{
hipfftResult iCUFFTRet = HIPFFT_SUCCESS;
/* execute plan */
iCUFFTRet = hipfftExecC2C(g_stPlan,
(hipfftComplex*) g_pf4FFTIn_d,
(hipfftComplex*) g_pf4FFTOut_d,
HIPFFT_FORWARD);
if (iCUFFTRet != HIPFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! FFT failed!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
__global__ void Accumulate(float4 *pf4FFTOut,
float4 *pf4SumStokes)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
float4 f4FFTOut = pf4FFTOut[i];
float4 f4SumStokes = pf4SumStokes[i];
/* Re(X)^2 + Im(X)^2 */
f4SumStokes.x += (f4FFTOut.x * f4FFTOut.x)
+ (f4FFTOut.y * f4FFTOut.y);
/* Re(Y)^2 + Im(Y)^2 */
f4SumStokes.y += (f4FFTOut.z * f4FFTOut.z)
+ (f4FFTOut.w * f4FFTOut.w);
/* Re(XY*) */
f4SumStokes.z += (f4FFTOut.x * f4FFTOut.z)
+ (f4FFTOut.y * f4FFTOut.w);
/* Im(XY*) */
f4SumStokes.w += (f4FFTOut.y * f4FFTOut.z)
- (f4FFTOut.x * f4FFTOut.w);
pf4SumStokes[i] = f4SumStokes;
return;
}
/* function that frees resources */
void CleanUp()
{
int i = 0;
/* free resources */
if (g_pc4InBuf != NULL)
{
free(g_pc4InBuf);
g_pc4InBuf = NULL;
}
if (g_pc4Data_d != NULL)
{
(void) hipFree(g_pc4Data_d);
g_pc4Data_d = NULL;
}
if (g_pf4FFTIn_d != NULL)
{
(void) hipFree(g_pf4FFTIn_d);
g_pf4FFTIn_d = NULL;
}
if (g_pf4FFTOut_d != NULL)
{
(void) hipFree(g_pf4FFTOut_d);
g_pf4FFTOut_d = NULL;
}
if (g_ppf4SumStokes != NULL)
{
for (i = 0; i < g_iNumChanBlocks; ++i)
{
if (g_ppf4SumStokes[i] != NULL)
{
free(g_ppf4SumStokes[i]);
g_ppf4SumStokes[i] = NULL;
}
}
free(g_ppf4SumStokes);
g_ppf4SumStokes = NULL;
}
if (g_ppf4SumStokes_d != NULL)
{
for (i = 0; i < g_iNumChanBlocks; ++i)
{
if (g_ppf4SumStokes_d[i] != NULL)
{
(void) hipFree(g_ppf4SumStokes_d[i]);
g_ppf4SumStokes_d[i] = NULL;
}
}
free(g_ppf4SumStokes_d);
g_ppf4SumStokes_d = NULL;
}
/* destroy plan */
/* TODO: check for plan */
(void) hipfftDestroy(g_stPlan);
#if PLOT
if (g_pfSumPowX != NULL)
{
free(g_pfSumPowX);
g_pfSumPowX = NULL;
}
if (g_pfSumPowY != NULL)
{
free(g_pfSumPowY);
g_pfSumPowY = NULL;
}
if (g_pfSumStokesRe != NULL)
{
free(g_pfSumStokesRe);
g_pfSumStokesRe = NULL;
}
if (g_pfSumStokesIm != NULL)
{
free(g_pfSumStokesIm);
g_pfSumStokesIm = NULL;
}
if (g_pfFreq != NULL)
{
free(g_pfFreq);
g_pfFreq = NULL;
}
cpgclos();
#endif
return;
}
#if BENCHMARKING
/* function to print benchmarking statistics */
void PrintBenchmarks(float fTotCpInFFT,
int iCountCpInFFT,
float fTotFFT,
int iCountFFT,
float fTotAccum,
int iCountAccum,
float fTotCpOut,
int iCountCpOut)
{
float fTotal = 0.0;
fTotal = g_fTotCpIn
+ fTotCpInFFT
+ fTotFFT
+ fTotAccum
+ fTotCpOut;
(void) printf(" Total elapsed time for\n");
(void) printf(" %6d calls to hipMemcpy(Host2Device) : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
g_iCountCpIn,
g_fTotCpIn,
(int) ((g_fTotCpIn / fTotal) * 100),
g_fTotCpIn / g_iCountCpIn);
(void) printf(" %6d calls to CopyDataForFFT() : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountCpInFFT,
fTotCpInFFT,
(int) ((fTotCpInFFT / fTotal) * 100),
fTotCpInFFT / iCountCpInFFT);
(void) printf(" %6d calls to DoFFT() : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountFFT,
fTotFFT,
(int) ((fTotFFT / fTotal) * 100),
fTotFFT / iCountFFT);
(void) printf(" %6d calls to Accumulate() : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountAccum,
fTotAccum,
(int) ((fTotAccum / fTotal) * 100),
fTotAccum / iCountAccum);
(void) printf(" %6d calls to hipMemcpy(Device2Host) : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountCpOut,
fTotCpOut,
(int) ((fTotCpOut / fTotal) * 100),
fTotCpOut / iCountCpOut);
return;
}
#endif
#if PLOT
int InitPlot()
{
int iRet = EXIT_SUCCESS;
int i = 0;
iRet = cpgopen(PG_DEV);
if (iRet <= 0)
{
(void) fprintf(stderr,
"ERROR: Opening graphics device %s failed!\n",
PG_DEV);
return EXIT_FAILURE;
}
cpgsch(3);
cpgsubp(g_iNumConcFFT, 4);
g_pfSumPowX = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumPowX)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfSumPowY = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumPowY)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfSumStokesRe = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumStokesRe)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfSumStokesIm = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumStokesIm)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfFreq = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfFreq)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
/* load the frequency axis */
for (i = 0; i < g_iNFFT; ++i)
{
g_pfFreq[i] = ((float) i * g_fFSamp) / g_iNFFT;
}
return EXIT_SUCCESS;
}
void Plot(int g_iAccID)
{
float fMinFreq = g_pfFreq[0];
float fMaxFreq = g_pfFreq[g_iNFFT-1];
float fMinY = FLT_MAX;
float fMaxY = -(FLT_MAX);
int i = 0;
int j = 0;
int k = 0;
for (k = 0; k < g_iNumConcFFT; ++k)
{
for (i = k, j = 0;
i < (g_iNumConcFFT * g_iNFFT);
i += g_iNumConcFFT, ++j)
{
if (0.0 == g_ppf4SumStokes[g_iAccID][i].x)
{
g_pfSumPowX[j] = 0.0;
}
else
{
g_pfSumPowX[j] = 10 * log10f(g_ppf4SumStokes[g_iAccID][i].x);
//g_pfSumPowX[j] = g_ppf4SumStokes[g_iAccID][i].x;
}
if (0.0 == g_ppf4SumStokes[g_iAccID][i].y)
{
g_pfSumPowY[j] = 0.0;
}
else
{
g_pfSumPowY[j] = 10 * log10f(g_ppf4SumStokes[g_iAccID][i].y);
//g_pfSumPowY[j] = g_ppf4SumStokes[g_iAccID][i].y;
}
g_pfSumStokesRe[j] = g_ppf4SumStokes[g_iAccID][i].z;
g_pfSumStokesIm[j] = g_ppf4SumStokes[g_iAccID][i].w;
}
/* plot accumulated X-pol. power */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumPowX[i] > fMaxY)
{
fMaxY = g_pfSumPowX[i];
}
if (g_pfSumPowX[i] < fMinY)
{
fMinY = g_pfSumPowX[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
for (i = 0; i < g_iNFFT; ++i)
{
g_pfSumPowX[i] -= fMaxY;
}
fMinY -= fMaxY;
fMaxY = 0;
cpgpanl(k + 1, 1);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumPowX");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumPowX);
cpgsci(PG_CI_DEF);
/* plot accumulated Y-pol. power */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumPowY[i] > fMaxY)
{
fMaxY = g_pfSumPowY[i];
}
if (g_pfSumPowY[i] < fMinY)
{
fMinY = g_pfSumPowY[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
for (i = 0; i < g_iNFFT; ++i)
{
g_pfSumPowY[i] -= fMaxY;
}
fMinY -= fMaxY;
fMaxY = 0;
cpgpanl(k + 1, 2);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumPowY");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumPowY);
cpgsci(PG_CI_DEF);
/* plot accumulated real(XY*) */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumStokesRe[i] > fMaxY)
{
fMaxY = g_pfSumStokesRe[i];
}
if (g_pfSumStokesRe[i] < fMinY)
{
fMinY = g_pfSumStokesRe[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
cpgpanl(k + 1, 3);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumStokesRe");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesRe);
cpgsci(PG_CI_DEF);
/* plot accumulated imag(XY*) */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumStokesIm[i] > fMaxY)
{
fMaxY = g_pfSumStokesIm[i];
}
if (g_pfSumStokesIm[i] < fMinY)
{
fMinY = g_pfSumStokesIm[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
cpgpanl(k + 1, 4);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumStokesIm");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesIm);
cpgsci(PG_CI_DEF);
}
return;
}
#endif
/*
* Registers handlers for SIGTERM and CTRL+C
*/
int RegisterSignalHandlers()
{
struct sigaction stSigHandler = {{0}};
int iRet = EXIT_SUCCESS;
/* register the CTRL+C-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGINT, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGINT);
return EXIT_FAILURE;
}
/* register the SIGTERM-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGTERM, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGTERM);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/*
* Catches SIGTERM and CTRL+C and cleans up before exiting
*/
void HandleStopSignals(int iSigNo)
{
/* clean up */
CleanUp();
/* exit */
exit(EXIT_SUCCESS);
/* never reached */
return;
}
void __CUDASafeCallWithCleanUp(hipError_t iRet,
const char* pcFile,
const int iLine,
void (*pCleanUp)(void))
{
if (iRet != hipSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
pcFile,
iLine,
hipGetErrorString(iRet));
/* free resources */
(*pCleanUp)();
exit(EXIT_FAILURE);
}
return;
}
/*
* Prints usage information
*/
void PrintUsage(const char *pcProgName)
{
(void) printf("Usage: %s [options] <data-file>\n",
pcProgName);
(void) printf(" -h --help ");
(void) printf("Display this usage information\n");
(void) printf(" -b --nsub ");
(void) printf("Number of sub-bands in the data\n");
(void) printf(" -n --nfft <value> ");
(void) printf("Number of points in FFT\n");
(void) printf(" -a --nacc <value> ");
(void) printf("Number of spectra to add\n");
#if PLOT
(void) printf(" -s --fsamp <value> ");
(void) printf("Sampling frequency\n");
#endif
return;
}
| 0143719fb38ea067d2a614080b4f94b09a70dafa.cu | /**
* @file s6_gpu_standalone.cu
* SERENDIP6 - Stand-Alone GPU Implementation
*
* @author Jayanth Chennamangalam
* @date 2013.07.19
*/
#include "s6_gpu_standalone.tex.h"
int g_iIsDataReadDone = FALSE;
int g_iIsProcDone = FALSE;
int g_iMaxThreadsPerBlock = 0;
char4* g_pc4InBuf = NULL;
char4* g_pc4InBufRead = NULL;
int g_iSizeFile = 0;
int g_iReadCount = 0;
char4* g_pc4Data_d = NULL; /* raw data starting address */
char4* g_pc4DataRead_d = NULL; /* raw data read pointer */
int g_iNFFT = DEF_LEN_SPEC;
dim3 g_dimBCopy(1, 1, 1);
dim3 g_dimGCopy(1, 1);
dim3 g_dimBAccum(1, 1, 1);
dim3 g_dimGAccum(1, 1);
float4* g_pf4FFTIn_d = NULL;
float4* g_pf4FFTOut_d = NULL;
cufftHandle g_stPlan = {0};
float4** g_ppf4SumStokes = NULL;
float4** g_ppf4SumStokes_d = NULL;
char g_acFileData[256] = {0};
int g_iNumSubBands = DEF_NUM_SUBBANDS;
long int g_lReqBytes = 0;
int g_iNumConcFFT = 4; /* number of channels that are to be FFT'd concurrently */
int g_iNumChanBlocks = 0;
int g_iAccID = 0;
int g_iReadID = 0;
texture<char4, 1, cudaReadModeNormalizedFloat> g_stTexRefData;
cudaChannelFormatDesc g_stChanDescData;
#if PLOT
float* g_pfSumPowX = NULL;
float* g_pfSumPowY = NULL;
float* g_pfSumStokesRe = NULL;
float* g_pfSumStokesIm = NULL;
float* g_pfFreq = NULL;
float g_fFSamp = 1.0; /* 1 [frequency] */
#endif
#if BENCHMARKING
float g_fTimeCpIn = 0.0;
float g_fTotCpIn = 0.0;
int g_iCountCpIn = 0;
cudaEvent_t g_cuStart;
cudaEvent_t g_cuStop;
#endif
int main(int argc, char *argv[])
{
int iRet = EXIT_SUCCESS;
int iSpecCount = 0;
int iNumAcc = DEF_ACC;
cudaError_t iCUDARet = cudaSuccess;
#if BENCHMARKING
float fTimeCpInFFT = 0.0;
float fTotCpInFFT = 0.0;
int iCountCpInFFT = 0;
float fTimeFFT = 0.0;
float fTotFFT = 0.0;
int iCountFFT = 0;
float fTimeCpOut = 0.0;
float fTotCpOut = 0.0;
int iCountCpOut = 0;
float fTimeAccum = 0.0;
float fTotAccum = 0.0;
int iCountAccum = 0;
#else
struct timeval stStart = {0};
struct timeval stStop = {0};
#endif
#if OUTFILE
int iFileSpec = 0;
#endif
const char *pcProgName = NULL;
int iNextOpt = 0;
/* valid short options */
#if PLOT
const char* const pcOptsShort = "hb:n:a:s:";
#else
const char* const pcOptsShort = "hb:n:a:";
#endif
/* valid long options */
const struct option stOptsLong[] = {
{ "help", 0, NULL, 'h' },
{ "nsub", 1, NULL, 'b' },
{ "nfft", 1, NULL, 'n' },
{ "nacc", 1, NULL, 'a' },
#if PLOT
{ "fsamp", 1, NULL, 's' },
#endif
{ NULL, 0, NULL, 0 }
};
/* get the filename of the program from the argument list */
pcProgName = argv[0];
/* parse the input */
do
{
iNextOpt = getopt_long(argc, argv, pcOptsShort, stOptsLong, NULL);
switch (iNextOpt)
{
case 'h': /* -h or --help */
/* print usage info and terminate */
PrintUsage(pcProgName);
return EXIT_SUCCESS;
case 'b': /* -b or --nsub */
/* set option */
g_iNumSubBands = (int) atoi(optarg);
break;
case 'n': /* -n or --nfft */
/* set option */
g_iNFFT = (int) atoi(optarg);
break;
case 'a': /* -a or --nacc */
/* set option */
iNumAcc = (int) atoi(optarg);
break;
#if PLOT
case 's': /* -s or --fsamp */
/* set option */
g_fFSamp = (float) atof(optarg);
break;
#endif
case '?': /* user specified an invalid option */
/* print usage info and terminate with error */
(void) fprintf(stderr, "ERROR: Invalid option!\n");
PrintUsage(pcProgName);
return EXIT_FAILURE;
case -1: /* done with options */
break;
default: /* unexpected */
assert(0);
}
} while (iNextOpt != -1);
/* no arguments */
if (argc <= optind)
{
(void) fprintf(stderr, "ERROR: Data file not specified!\n");
PrintUsage(pcProgName);
return EXIT_FAILURE;
}
(void) strncpy(g_acFileData, argv[optind], 256);
g_acFileData[255] = '\0';
#if BENCHMARKING
(void) printf("* Benchmarking run commencing...\n");
#endif
/* initialise */
iRet = Init();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! Init failed!\n");
CleanUp();
return EXIT_FAILURE;
}
#if OUTFILE
iFileSpec = open("spec.dat",
O_CREAT | O_TRUNC | O_WRONLY,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (iFileSpec < EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! Opening spectrum file failed!\n");
CleanUp();
return EXIT_FAILURE;
}
#endif
#if (!BENCHMARKING)
(void) gettimeofday(&stStart, NULL);
#endif
while (!g_iIsProcDone)
{
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart));
#endif
CopyDataForFFT<<<g_dimGCopy, g_dimBCopy>>>(g_pc4DataRead_d,
g_pf4FFTIn_d);
CUDASafeCallWithCleanUp(cudaThreadSynchronize());
iCUDARet = cudaGetLastError();
if (iCUDARet != cudaSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
cudaGetErrorString(iCUDARet));
/* free resources */
CleanUp();
return EXIT_FAILURE;
}
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeCpInFFT,
g_cuStart,
g_cuStop));
fTotCpInFFT += fTimeCpInFFT;
++iCountCpInFFT;
#endif
/* do fft */
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart));
#endif
iRet = DoFFT();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! FFT failed!\n");
#if OUTFILE
(void) close(iFileSpec);
#endif
CleanUp();
return EXIT_FAILURE;
}
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeFFT,
g_cuStart,
g_cuStop));
fTotFFT += fTimeFFT;
++iCountFFT;
#endif
/* accumulate power x, power y, stokes, if the blanking bit is
not set */
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart));
#endif
Accumulate<<<g_dimGAccum, g_dimBAccum>>>(g_pf4FFTOut_d,
g_ppf4SumStokes_d[g_iAccID]);
CUDASafeCallWithCleanUp(cudaThreadSynchronize());
iCUDARet = cudaGetLastError();
if (iCUDARet != cudaSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
__FILE__,
__LINE__,
cudaGetErrorString(iCUDARet));
/* free resources */
CleanUp();
return EXIT_FAILURE;
}
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeAccum,
g_cuStart,
g_cuStop));
fTotAccum += fTimeAccum;
++iCountAccum;
#endif
if (0 == g_iAccID)
{
++iSpecCount;
}
if (iSpecCount == iNumAcc)
{
/* dump to buffer */
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart));
#endif
CUDASafeCallWithCleanUp(cudaMemcpy(g_ppf4SumStokes[g_iAccID],
g_ppf4SumStokes_d[g_iAccID],
(g_iNumConcFFT
* g_iNFFT
* sizeof(float4)),
cudaMemcpyDeviceToHost));
#if PLOT
/* NOTE: Plot() will modify data! */
Plot(g_iAccID);
(void) usleep(500000);
#endif
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(cudaEventElapsedTime(&fTimeCpOut,
g_cuStart,
g_cuStop));
fTotCpOut += fTimeCpOut;
++iCountCpOut;
#endif
#if OUTFILE
(void) write(iFileSpec,
g_ppf4SumStokes[g_iAccID],
g_iNumConcFFT * g_iNFFT * sizeof(float4));
#endif
/* reset time */
if ((g_iNumChanBlocks - 1) == g_iAccID)
{
iSpecCount = 0;
}
/* zero accumulators */
CUDASafeCallWithCleanUp(cudaMemset(g_ppf4SumStokes_d[g_iAccID],
'\0',
(g_iNumConcFFT
* g_iNFFT
* sizeof(float4))));
}
if (!g_iIsDataReadDone)
{
/* read data from input buffer */
iRet = ReadData();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Data reading failed!\n");
break;
}
g_iAccID = (g_iAccID + 1) % g_iNumChanBlocks;
}
else /* no more data to be read */
{
g_iIsProcDone = TRUE;
}
}
#if (!BENCHMARKING)
(void) gettimeofday(&stStop, NULL);
(void) printf("Time taken (barring Init()): %gs\n",
((stStop.tv_sec + (stStop.tv_usec * USEC2SEC))
- (stStart.tv_sec + (stStart.tv_usec * USEC2SEC))));
#endif
#if OUTFILE
(void) close(iFileSpec);
#endif
CleanUp();
#if BENCHMARKING
PrintBenchmarks(fTotCpInFFT,
iCountCpInFFT,
fTotFFT,
iCountFFT,
fTotAccum,
iCountAccum,
fTotCpOut,
iCountCpOut);
CUDASafeCallWithCleanUp(cudaEventDestroy(g_cuStart));
CUDASafeCallWithCleanUp(cudaEventDestroy(g_cuStop));
(void) printf("* Events destroyed.\n");
(void) printf("* Benchmarking run completed.\n");
#endif
return EXIT_SUCCESS;
}
/* function that creates the FFT plan, allocates memory, initialises counters,
etc. */
int Init()
{
int iDevCount = 0;
cudaDeviceProp stDevProp = {0};
int iRet = EXIT_SUCCESS;
cufftResult iCUFFTRet = CUFFT_SUCCESS;
size_t lTotCUDAMalloc = 0;
int i = 0;
iRet = RegisterSignalHandlers();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Signal-handler registration failed!\n");
return EXIT_FAILURE;
}
/* since CUDASafeCallWithCleanUp() calls cudaGetErrorString(),
it should not be used here - will cause crash if no CUDA device is
found */
(void) cudaGetDeviceCount(&iDevCount);
if (0 == iDevCount)
{
(void) fprintf(stderr, "ERROR: No CUDA-capable device found!\n");
return EXIT_FAILURE;
}
else if (iDevCount > 1)
{
/* TODO: figure this out */
(void) fprintf(stderr,
"ERROR: More than one CUDA-capable device "
"found! Don't know how to proceed!\n");
return EXIT_FAILURE;
}
/* TODO: make it automagic */
CUDASafeCallWithCleanUp(cudaSetDevice(0));
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventCreate(&g_cuStart));
CUDASafeCallWithCleanUp(cudaEventCreate(&g_cuStop));
(void) printf("* Events created.\n");
#endif
CUDASafeCallWithCleanUp(cudaGetDeviceProperties(&stDevProp, 0));
g_iMaxThreadsPerBlock = stDevProp.maxThreadsPerBlock;
g_iNumChanBlocks = g_iNumSubBands / g_iNumConcFFT;
lTotCUDAMalloc += g_iNumConcFFT * g_iNFFT * sizeof(char4);
lTotCUDAMalloc += (g_iNumConcFFT * g_iNFFT * sizeof(float4));
lTotCUDAMalloc += (g_iNumConcFFT * g_iNFFT * sizeof(float4));
lTotCUDAMalloc += (g_iNumConcFFT * g_iNFFT * sizeof(float4));
if (lTotCUDAMalloc > stDevProp.totalGlobalMem)
{
(void) fprintf(stderr,
"ERROR: Total memory requested on GPU is %g of a "
"possible %g MB. Memory request break-up:\n"
" Input data buffer: %g MB\n"
" FFT in array: %g MB\n"
" FFT out array: %g MB\n"
" Stokes output array: %g MB\n",
((float) lTotCUDAMalloc) / (1024 * 1024),
((float) stDevProp.totalGlobalMem) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(char4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024));
return EXIT_FAILURE;
}
#ifdef DEBUG
else
{
(void) printf("INFO: Total memory requested on GPU is %g of a "
"possible %g MB. Memory request break-up:\n"
" Input data buffer: %g MB\n"
" FFT in array: %g MB\n"
" FFT out array: %g MB\n"
" Stokes output array: %g MB\n",
((float) lTotCUDAMalloc) / (1024 * 1024),
((float) stDevProp.totalGlobalMem) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(char4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024),
((float) g_iNumConcFFT * g_iNFFT * sizeof(float4)) / (1024 * 1024));
}
#endif
/* allocate memory for data array - 32MB is the block size for the VEGAS
input buffer */
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pc4Data_d, g_iNumConcFFT * g_iNFFT * sizeof(char4)));
g_pc4DataRead_d = g_pc4Data_d;
/* load data into memory */
iRet = LoadDataToMem();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! Loading to memory failed!\n");
return EXIT_FAILURE;
}
/* calculate kernel parameters */
if (g_iNFFT < g_iMaxThreadsPerBlock)
{
g_dimBCopy.x = g_iNFFT;
g_dimBAccum.x = g_iNFFT;
}
else
{
g_dimBCopy.x = g_iMaxThreadsPerBlock;
g_dimBAccum.x = g_iMaxThreadsPerBlock;
}
g_dimGCopy.x = (g_iNumConcFFT * g_iNFFT) / g_iMaxThreadsPerBlock;
g_dimGAccum.x = (g_iNumConcFFT * g_iNFFT) / g_iMaxThreadsPerBlock;
iRet = ReadData();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Reading data failed!\n");
return EXIT_FAILURE;
}
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTIn_d,
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_pf4FFTOut_d,
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
g_ppf4SumStokes = (float4 **) malloc(g_iNumChanBlocks * sizeof(float4 **));
if (NULL == g_ppf4SumStokes)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
for (i = 0; i < g_iNumChanBlocks; ++i)
{
g_ppf4SumStokes[i] = (float4 *) malloc(g_iNumConcFFT
* g_iNFFT
* sizeof(float4));
if (NULL == g_ppf4SumStokes[i])
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
}
g_ppf4SumStokes_d = (float4 **) malloc(g_iNumChanBlocks * sizeof(float4 **));
if (NULL == g_ppf4SumStokes_d)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
for (i = 0; i < g_iNumChanBlocks; ++i)
{
CUDASafeCallWithCleanUp(cudaMalloc((void **) &g_ppf4SumStokes_d[i],
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
CUDASafeCallWithCleanUp(cudaMemset(g_ppf4SumStokes_d[i],
'\0',
g_iNumConcFFT
* g_iNFFT
* sizeof(float4)));
}
g_stChanDescData = cudaCreateChannelDesc<signed char>();
CUDASafeCallWithCleanUp(cudaBindTexture(0,
&g_stTexRefData,
g_pc4Data_d,
&g_stChanDescData,
g_iNumConcFFT * g_iNFFT * sizeof(char4)));
/* create plan */
iCUFFTRet = cufftPlanMany(&g_stPlan,
FFTPLAN_RANK,
&g_iNFFT,
&g_iNFFT,
FFTPLAN_ISTRIDE,
FFTPLAN_IDIST,
&g_iNFFT,
FFTPLAN_OSTRIDE,
FFTPLAN_ODIST,
CUFFT_C2C,
FFTPLAN_BATCH);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR: Plan creation failed!\n");
return EXIT_FAILURE;
}
#if PLOT
iRet = InitPlot();
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Plotting initialisation failed!\n");
return EXIT_FAILURE;
}
#endif
return EXIT_SUCCESS;
}
/* function that reads data from the data file and loads it into memory during
initialisation */
int LoadDataToMem()
{
struct stat stFileStats = {0};
int iRet = EXIT_SUCCESS;
int iFileData = 0;
iRet = stat(g_acFileData, &stFileStats);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Failed to stat %s: %s!\n",
g_acFileData,
strerror(errno));
return EXIT_FAILURE;
}
g_iSizeFile = stFileStats.st_size;
g_pc4InBuf = (char4*) malloc(g_iSizeFile);
if (NULL == g_pc4InBuf)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
iFileData = open(g_acFileData, O_RDONLY);
if (iFileData < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR! Opening data file %s failed! %s.\n",
g_acFileData,
strerror(errno));
return EXIT_FAILURE;
}
iRet = read(iFileData, g_pc4InBuf, g_iSizeFile);
if (iRet < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Data reading failed! %s.\n",
strerror(errno));
(void) close(iFileData);
return EXIT_FAILURE;
}
else if (iRet != stFileStats.st_size)
{
(void) printf("File read done!\n");
}
(void) close(iFileData);
/* set the read pointer to the beginning of the data array */
g_pc4InBufRead = g_pc4InBuf;
return EXIT_SUCCESS;
}
/* function that reads data from input buffer */
int ReadData()
{
//printf("%d, %ld\n", g_iAccID, (g_pc4InBufRead-g_pc4InBuf)*sizeof(char4));
/* write new data to the write buffer */
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStart, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStart));
#endif
/* strided copy of g_iNumConcFFT channels (* 2 polarizations) */
CUDASafeCallWithCleanUp(cudaMemcpy2D(g_pc4Data_d,
g_iNumConcFFT * sizeof(char4), /* dest. pitch */
g_pc4InBufRead,
g_iNumSubBands * sizeof(char4), /* src. pitch */
g_iNumConcFFT * sizeof(char4),
g_iNFFT,
cudaMemcpyHostToDevice));
#if BENCHMARKING
CUDASafeCallWithCleanUp(cudaEventRecord(g_cuStop, 0));
CUDASafeCallWithCleanUp(cudaEventSynchronize(g_cuStop));
CUDASafeCallWithCleanUp(cudaEventElapsedTime(&g_fTimeCpIn,
g_cuStart,
g_cuStop));
g_fTotCpIn += g_fTimeCpIn;
++g_iCountCpIn;
#endif
/* update the read pointer to where data needs to be read in from, in the
next read */
if (g_iNumConcFFT == g_iNumSubBands)
{
g_pc4InBufRead += (g_iNumSubBands * g_iNFFT);
}
else
{
if ((g_iNumChanBlocks - 1) == g_iReadID)
{
g_pc4InBufRead += (g_iNumConcFFT % g_iNumSubBands);
g_pc4InBufRead += (g_iNumSubBands * (g_iNFFT - 1));
}
else
{
g_pc4InBufRead += g_iNumConcFFT;
}
}
/* whenever there is a read, reset the read pointer to the beginning */
g_pc4DataRead_d = g_pc4Data_d;
++g_iReadCount;
/* a buggy way to check for end of data */
if ((((char *) g_pc4InBuf) + g_iSizeFile) - ((char *) g_pc4InBufRead)
<= (g_iNumSubBands * g_iNFFT * sizeof(char4)))
{
(void) printf("Data read done! Read count = %d\n", g_iReadCount);
g_iIsDataReadDone = TRUE;
}
g_iReadID = (g_iReadID + 1) % g_iNumChanBlocks;
return EXIT_SUCCESS;
}
__global__ void CopyDataForFFT(char4 *pc4Data,
float4 *pf4FFTIn)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
/*
pf4FFTIn[i].x = (float) pc4Data[i].x;
pf4FFTIn[i].y = (float) pc4Data[i].y;
pf4FFTIn[i].z = (float) pc4Data[i].z;
pf4FFTIn[i].w = (float) pc4Data[i].w;
*/
*pf4FFTIn = tex1Dfetch(g_stTexRefData, i);
return;
}
/* function that performs the FFT */
int DoFFT()
{
cufftResult iCUFFTRet = CUFFT_SUCCESS;
/* execute plan */
iCUFFTRet = cufftExecC2C(g_stPlan,
(cufftComplex*) g_pf4FFTIn_d,
(cufftComplex*) g_pf4FFTOut_d,
CUFFT_FORWARD);
if (iCUFFTRet != CUFFT_SUCCESS)
{
(void) fprintf(stderr, "ERROR! FFT failed!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
__global__ void Accumulate(float4 *pf4FFTOut,
float4 *pf4SumStokes)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
float4 f4FFTOut = pf4FFTOut[i];
float4 f4SumStokes = pf4SumStokes[i];
/* Re(X)^2 + Im(X)^2 */
f4SumStokes.x += (f4FFTOut.x * f4FFTOut.x)
+ (f4FFTOut.y * f4FFTOut.y);
/* Re(Y)^2 + Im(Y)^2 */
f4SumStokes.y += (f4FFTOut.z * f4FFTOut.z)
+ (f4FFTOut.w * f4FFTOut.w);
/* Re(XY*) */
f4SumStokes.z += (f4FFTOut.x * f4FFTOut.z)
+ (f4FFTOut.y * f4FFTOut.w);
/* Im(XY*) */
f4SumStokes.w += (f4FFTOut.y * f4FFTOut.z)
- (f4FFTOut.x * f4FFTOut.w);
pf4SumStokes[i] = f4SumStokes;
return;
}
/* function that frees resources */
void CleanUp()
{
int i = 0;
/* free resources */
if (g_pc4InBuf != NULL)
{
free(g_pc4InBuf);
g_pc4InBuf = NULL;
}
if (g_pc4Data_d != NULL)
{
(void) cudaFree(g_pc4Data_d);
g_pc4Data_d = NULL;
}
if (g_pf4FFTIn_d != NULL)
{
(void) cudaFree(g_pf4FFTIn_d);
g_pf4FFTIn_d = NULL;
}
if (g_pf4FFTOut_d != NULL)
{
(void) cudaFree(g_pf4FFTOut_d);
g_pf4FFTOut_d = NULL;
}
if (g_ppf4SumStokes != NULL)
{
for (i = 0; i < g_iNumChanBlocks; ++i)
{
if (g_ppf4SumStokes[i] != NULL)
{
free(g_ppf4SumStokes[i]);
g_ppf4SumStokes[i] = NULL;
}
}
free(g_ppf4SumStokes);
g_ppf4SumStokes = NULL;
}
if (g_ppf4SumStokes_d != NULL)
{
for (i = 0; i < g_iNumChanBlocks; ++i)
{
if (g_ppf4SumStokes_d[i] != NULL)
{
(void) cudaFree(g_ppf4SumStokes_d[i]);
g_ppf4SumStokes_d[i] = NULL;
}
}
free(g_ppf4SumStokes_d);
g_ppf4SumStokes_d = NULL;
}
/* destroy plan */
/* TODO: check for plan */
(void) cufftDestroy(g_stPlan);
#if PLOT
if (g_pfSumPowX != NULL)
{
free(g_pfSumPowX);
g_pfSumPowX = NULL;
}
if (g_pfSumPowY != NULL)
{
free(g_pfSumPowY);
g_pfSumPowY = NULL;
}
if (g_pfSumStokesRe != NULL)
{
free(g_pfSumStokesRe);
g_pfSumStokesRe = NULL;
}
if (g_pfSumStokesIm != NULL)
{
free(g_pfSumStokesIm);
g_pfSumStokesIm = NULL;
}
if (g_pfFreq != NULL)
{
free(g_pfFreq);
g_pfFreq = NULL;
}
cpgclos();
#endif
return;
}
#if BENCHMARKING
/* function to print benchmarking statistics */
void PrintBenchmarks(float fTotCpInFFT,
int iCountCpInFFT,
float fTotFFT,
int iCountFFT,
float fTotAccum,
int iCountAccum,
float fTotCpOut,
int iCountCpOut)
{
float fTotal = 0.0;
fTotal = g_fTotCpIn
+ fTotCpInFFT
+ fTotFFT
+ fTotAccum
+ fTotCpOut;
(void) printf(" Total elapsed time for\n");
(void) printf(" %6d calls to cudaMemcpy(Host2Device) : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
g_iCountCpIn,
g_fTotCpIn,
(int) ((g_fTotCpIn / fTotal) * 100),
g_fTotCpIn / g_iCountCpIn);
(void) printf(" %6d calls to CopyDataForFFT() : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountCpInFFT,
fTotCpInFFT,
(int) ((fTotCpInFFT / fTotal) * 100),
fTotCpInFFT / iCountCpInFFT);
(void) printf(" %6d calls to DoFFT() : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountFFT,
fTotFFT,
(int) ((fTotFFT / fTotal) * 100),
fTotFFT / iCountFFT);
(void) printf(" %6d calls to Accumulate() : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountAccum,
fTotAccum,
(int) ((fTotAccum / fTotal) * 100),
fTotAccum / iCountAccum);
(void) printf(" %6d calls to cudaMemcpy(Device2Host) : "
"%5.3fms, %2d%%; Average = %5.3fms\n",
iCountCpOut,
fTotCpOut,
(int) ((fTotCpOut / fTotal) * 100),
fTotCpOut / iCountCpOut);
return;
}
#endif
#if PLOT
int InitPlot()
{
int iRet = EXIT_SUCCESS;
int i = 0;
iRet = cpgopen(PG_DEV);
if (iRet <= 0)
{
(void) fprintf(stderr,
"ERROR: Opening graphics device %s failed!\n",
PG_DEV);
return EXIT_FAILURE;
}
cpgsch(3);
cpgsubp(g_iNumConcFFT, 4);
g_pfSumPowX = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumPowX)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfSumPowY = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumPowY)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfSumStokesRe = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumStokesRe)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfSumStokesIm = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfSumStokesIm)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
g_pfFreq = (float*) malloc(g_iNFFT * sizeof(float));
if (NULL == g_pfFreq)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
/* load the frequency axis */
for (i = 0; i < g_iNFFT; ++i)
{
g_pfFreq[i] = ((float) i * g_fFSamp) / g_iNFFT;
}
return EXIT_SUCCESS;
}
void Plot(int g_iAccID)
{
float fMinFreq = g_pfFreq[0];
float fMaxFreq = g_pfFreq[g_iNFFT-1];
float fMinY = FLT_MAX;
float fMaxY = -(FLT_MAX);
int i = 0;
int j = 0;
int k = 0;
for (k = 0; k < g_iNumConcFFT; ++k)
{
for (i = k, j = 0;
i < (g_iNumConcFFT * g_iNFFT);
i += g_iNumConcFFT, ++j)
{
if (0.0 == g_ppf4SumStokes[g_iAccID][i].x)
{
g_pfSumPowX[j] = 0.0;
}
else
{
g_pfSumPowX[j] = 10 * log10f(g_ppf4SumStokes[g_iAccID][i].x);
//g_pfSumPowX[j] = g_ppf4SumStokes[g_iAccID][i].x;
}
if (0.0 == g_ppf4SumStokes[g_iAccID][i].y)
{
g_pfSumPowY[j] = 0.0;
}
else
{
g_pfSumPowY[j] = 10 * log10f(g_ppf4SumStokes[g_iAccID][i].y);
//g_pfSumPowY[j] = g_ppf4SumStokes[g_iAccID][i].y;
}
g_pfSumStokesRe[j] = g_ppf4SumStokes[g_iAccID][i].z;
g_pfSumStokesIm[j] = g_ppf4SumStokes[g_iAccID][i].w;
}
/* plot accumulated X-pol. power */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumPowX[i] > fMaxY)
{
fMaxY = g_pfSumPowX[i];
}
if (g_pfSumPowX[i] < fMinY)
{
fMinY = g_pfSumPowX[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
for (i = 0; i < g_iNFFT; ++i)
{
g_pfSumPowX[i] -= fMaxY;
}
fMinY -= fMaxY;
fMaxY = 0;
cpgpanl(k + 1, 1);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumPowX");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumPowX);
cpgsci(PG_CI_DEF);
/* plot accumulated Y-pol. power */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumPowY[i] > fMaxY)
{
fMaxY = g_pfSumPowY[i];
}
if (g_pfSumPowY[i] < fMinY)
{
fMinY = g_pfSumPowY[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
for (i = 0; i < g_iNFFT; ++i)
{
g_pfSumPowY[i] -= fMaxY;
}
fMinY -= fMaxY;
fMaxY = 0;
cpgpanl(k + 1, 2);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumPowY");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumPowY);
cpgsci(PG_CI_DEF);
/* plot accumulated real(XY*) */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumStokesRe[i] > fMaxY)
{
fMaxY = g_pfSumStokesRe[i];
}
if (g_pfSumStokesRe[i] < fMinY)
{
fMinY = g_pfSumStokesRe[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
cpgpanl(k + 1, 3);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumStokesRe");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesRe);
cpgsci(PG_CI_DEF);
/* plot accumulated imag(XY*) */
fMinY = FLT_MAX;
fMaxY = -(FLT_MAX);
for (i = 0; i < g_iNFFT; ++i)
{
if (g_pfSumStokesIm[i] > fMaxY)
{
fMaxY = g_pfSumStokesIm[i];
}
if (g_pfSumStokesIm[i] < fMinY)
{
fMinY = g_pfSumStokesIm[i];
}
}
/* to avoid min == max */
fMaxY += 1.0;
fMinY -= 1.0;
cpgpanl(k + 1, 4);
cpgeras();
cpgsvp(PG_VP_ML, PG_VP_MR, PG_VP_MB, PG_VP_MT);
cpgswin(fMinFreq, fMaxFreq, fMinY, fMaxY);
//cpglab("Bin Number", "", "SumStokesIm");
cpgbox("BCNST", 0.0, 0, "BCNST", 0.0, 0);
cpgsci(PG_CI_PLOT);
cpgline(g_iNFFT, g_pfFreq, g_pfSumStokesIm);
cpgsci(PG_CI_DEF);
}
return;
}
#endif
/*
* Registers handlers for SIGTERM and CTRL+C
*/
int RegisterSignalHandlers()
{
struct sigaction stSigHandler = {{0}};
int iRet = EXIT_SUCCESS;
/* register the CTRL+C-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGINT, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGINT);
return EXIT_FAILURE;
}
/* register the SIGTERM-handling function */
stSigHandler.sa_handler = HandleStopSignals;
iRet = sigaction(SIGTERM, &stSigHandler, NULL);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Handler registration failed for signal %d!\n",
SIGTERM);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/*
* Catches SIGTERM and CTRL+C and cleans up before exiting
*/
void HandleStopSignals(int iSigNo)
{
/* clean up */
CleanUp();
/* exit */
exit(EXIT_SUCCESS);
/* never reached */
return;
}
void __CUDASafeCallWithCleanUp(cudaError_t iRet,
const char* pcFile,
const int iLine,
void (*pCleanUp)(void))
{
if (iRet != cudaSuccess)
{
(void) fprintf(stderr,
"ERROR: File <%s>, Line %d: %s\n",
pcFile,
iLine,
cudaGetErrorString(iRet));
/* free resources */
(*pCleanUp)();
exit(EXIT_FAILURE);
}
return;
}
/*
* Prints usage information
*/
void PrintUsage(const char *pcProgName)
{
(void) printf("Usage: %s [options] <data-file>\n",
pcProgName);
(void) printf(" -h --help ");
(void) printf("Display this usage information\n");
(void) printf(" -b --nsub ");
(void) printf("Number of sub-bands in the data\n");
(void) printf(" -n --nfft <value> ");
(void) printf("Number of points in FFT\n");
(void) printf(" -a --nacc <value> ");
(void) printf("Number of spectra to add\n");
#if PLOT
(void) printf(" -s --fsamp <value> ");
(void) printf("Sampling frequency\n");
#endif
return;
}
|
530177ed69a54dff4a31a1e210a36df49ff8464b.hip | // !!! This is a file automatically generated by hipify!!!
#if 0
#include "Renderer.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "util.h"
struct Presets;
#define PI 3.14159265359;
#define kC_1 3.7418e-16
#define kC_2 1.4388e-2
__constant__ int dev_xsize;
__constant__ int dev_ysize;
__constant__ int dev_gridx;
__constant__ int dev_gridy;
__constant__ int dev_gridz;
__constant__ double dev_oa;
__constant__ double dev_os;
__constant__ double dev_ot;
__constant__ double dev_w;
__constant__ double dev_h;
__constant__ double dev_near_plane_distance;
__constant__ double dev_wds;
__constant__ double dev_C;
__constant__ double dev_CIE_X[kCIELEN];
__constant__ double dev_CIE_Y[kCIELEN];
__constant__ double dev_CIE_Z[kCIELEN];
__constant__ double dev_fast_trans[16];
__constant__ double dev_fast_itrans[16];
const double CIE_X[] = {
1.299000e-04, 2.321000e-04, 4.149000e-04, 7.416000e-04, 1.368000e-03,
2.236000e-03, 4.243000e-03, 7.650000e-03, 1.431000e-02, 2.319000e-02,
4.351000e-02, 7.763000e-02, 1.343800e-01, 2.147700e-01, 2.839000e-01,
3.285000e-01, 3.482800e-01, 3.480600e-01, 3.362000e-01, 3.187000e-01,
2.908000e-01, 2.511000e-01, 1.953600e-01, 1.421000e-01, 9.564000e-02,
5.795001e-02, 3.201000e-02, 1.470000e-02, 4.900000e-03, 2.400000e-03,
9.300000e-03, 2.910000e-02, 6.327000e-02, 1.096000e-01, 1.655000e-01,
2.257499e-01, 2.904000e-01, 3.597000e-01, 4.334499e-01, 5.120501e-01,
5.945000e-01, 6.784000e-01, 7.621000e-01, 8.425000e-01, 9.163000e-01,
9.786000e-01, 1.026300e+00, 1.056700e+00, 1.062200e+00, 1.045600e+00,
1.002600e+00, 9.384000e-01, 8.544499e-01, 7.514000e-01, 6.424000e-01,
5.419000e-01, 4.479000e-01, 3.608000e-01, 2.835000e-01, 2.187000e-01,
1.649000e-01, 1.212000e-01, 8.740000e-02, 6.360000e-02, 4.677000e-02,
3.290000e-02, 2.270000e-02, 1.584000e-02, 1.135916e-02, 8.110916e-03,
5.790346e-03, 4.106457e-03, 2.899327e-03, 2.049190e-03, 1.439971e-03,
9.999493e-04, 6.900786e-04, 4.760213e-04, 3.323011e-04, 2.348261e-04,
1.661505e-04, 1.174130e-04, 8.307527e-05, 5.870652e-05, 4.150994e-05,
2.935326e-05, 2.067383e-05, 1.455977e-05, 1.025398e-05, 7.221456e-06,
5.085868e-06, 3.581652e-06, 2.522525e-06, 1.776509e-06, 1.251141e-06
};
const double CIE_Y[] = { 3.917000e-06, 6.965000e-06, 1.239000e-05, 2.202000e-05, 3.900000e-05,
6.400000e-05, 1.200000e-04, 2.170000e-04, 3.960000e-04, 6.400000e-04,
1.210000e-03, 2.180000e-03, 4.000000e-03, 7.300000e-03, 1.160000e-02,
1.684000e-02, 2.300000e-02, 2.980000e-02, 3.800000e-02, 4.800000e-02,
6.000000e-02, 7.390000e-02, 9.098000e-02, 1.126000e-01, 1.390200e-01,
1.693000e-01, 2.080200e-01, 2.586000e-01, 3.230000e-01, 4.073000e-01,
5.030000e-01, 6.082000e-01, 7.100000e-01, 7.932000e-01, 8.620000e-01,
9.148501e-01, 9.540000e-01, 9.803000e-01, 9.949501e-01, 1.000000e+00,
9.950000e-01, 9.786000e-01, 9.520000e-01, 9.154000e-01, 8.700000e-01,
8.163000e-01, 7.570000e-01, 6.949000e-01, 6.310000e-01, 5.668000e-01,
5.030000e-01, 4.412000e-01, 3.810000e-01, 3.210000e-01, 2.650000e-01,
2.170000e-01, 1.750000e-01, 1.382000e-01, 1.070000e-01, 8.160000e-02,
6.100000e-02, 4.458000e-02, 3.200000e-02, 2.320000e-02, 1.700000e-02,
1.192000e-02, 8.210000e-03, 5.723000e-03, 4.102000e-03, 2.929000e-03,
2.091000e-03, 1.484000e-03, 1.047000e-03, 7.400000e-04, 5.200000e-04,
3.611000e-04, 2.492000e-04, 1.719000e-04, 1.200000e-04, 8.480000e-05,
6.000000e-05, 4.240000e-05, 3.000000e-05, 2.120000e-05, 1.499000e-05,
1.060000e-05, 7.465700e-06, 5.257800e-06, 3.702900e-06, 2.607800e-06,
1.836600e-06, 1.293400e-06, 9.109300e-07, 6.415300e-07, 4.518100e-07 };
const double CIE_Z[] = { 6.061000e-04, 1.086000e-03, 1.946000e-03, 3.486000e-03, 6.450001e-03,
1.054999e-02, 2.005001e-02, 3.621000e-02, 6.785001e-02, 1.102000e-01,
2.074000e-01, 3.713000e-01, 6.456000e-01, 1.039050e+00, 1.385600e+00,
1.622960e+00, 1.747060e+00, 1.782600e+00, 1.772110e+00, 1.744100e+00,
1.669200e+00, 1.528100e+00, 1.287640e+00, 1.041900e+00, 8.129501e-01,
6.162000e-01, 4.651800e-01, 3.533000e-01, 2.720000e-01, 2.123000e-01,
1.582000e-01, 1.117000e-01, 7.824999e-02, 5.725001e-02, 4.216000e-02,
2.984000e-02, 2.030000e-02, 1.340000e-02, 8.749999e-03, 5.749999e-03,
3.900000e-03, 2.749999e-03, 2.100000e-03, 1.800000e-03, 1.650001e-03,
1.400000e-03, 1.100000e-03, 1.000000e-03, 8.000000e-04, 6.000000e-04,
3.400000e-04, 2.400000e-04, 1.900000e-04, 1.000000e-04, 4.999999e-05,
3.000000e-05, 2.000000e-05, 1.000000e-05, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00 };
__device__ void indexToLocal(int i, int j, int k, double &l_x, double &l_y, double &l_z) {
l_x = double(i) / double(dev_gridx - 1);
l_y = double(j) / double(dev_gridy - 1);
l_z = double(k) / double(dev_gridz - 1);
}
__device__ void localToWorld(double *trans, double l_x, double l_y, double l_z, double &w_x, double &w_y, double &w_z) {
w_x = l_x*trans[0] + l_y*trans[1] + l_z*trans[2] + trans[3];
w_y = l_x*trans[4] + l_y*trans[5] + l_z*trans[6] + trans[7];
w_z = l_x*trans[8] + l_y*trans[9] + l_z*trans[10] + trans[11];
}
__device__ void indexToWorld(double *trans, int i, int j, int k, double &w_x, double &w_y, double &w_z) {
double l_x, l_y, l_z;
indexToLocal(i, j, k, l_x, l_y, l_z);
localToWorld(trans, l_x, l_y, l_z, w_x, w_y, w_z);
}
__device__ void worldToLocal(double *itrans, const double w_x, const double w_y, const double w_z, double &l_x, double &l_y, double &l_z) {
l_x = w_x*itrans[0] + w_y*itrans[1] + w_z*itrans[2] + itrans[3];
l_y = w_x*itrans[4] + w_y*itrans[5] + w_z*itrans[6] + itrans[7];
l_z = w_x*itrans[8] + w_y*itrans[9] + w_z*itrans[10] + itrans[11];
}
__device__ void localToUpperLeftIndex(const double l_x, const double l_y, const double l_z, int &i, int &j, int &k) {
i = (int)floor(l_x*double(dev_gridx - 1));
j = (int)floor(l_y*double(dev_gridy - 1));
k = (int)floor(l_z*double(dev_gridz - 1));
}
__device__ void worldToUpperLeftIndex(double *itrans, const double w_x, const double w_y, const double w_z, int &i, int &j, int &k) {
double l_x, l_y, l_z;
worldToLocal(itrans, w_x, w_y, w_z, l_x, l_y, l_z);
localToUpperLeftIndex(l_x, l_y, l_z, i, j, k);
}
__device__ bool localIsValid(double l_x, double l_y, double l_z) {
return l_x >= 0.0 && l_x < 1.0 && l_y >= 0.0 && l_y < 1.0 && l_z >= 0.0 && l_z < 1.0;
}
__device__ bool worldIsValid(double *itrans, const double w_x, const double w_y, const double w_z) {
double l_x, l_y, l_z;
worldToLocal(itrans, w_x, w_y, w_z, l_x, l_y, l_z);
return localIsValid(l_x, l_y, l_z);
}
__device__ double valueAtWorld(double *g, double *itrans, double* trans, double w_x, double w_y, double w_z) {
int i, j, k;
double w_x0, w_y0, w_z0;
double w_x1, w_y1, w_z1;
double x, y, z;
//Konvertera vrldskoordinater till cellkoordinater
worldToUpperLeftIndex(itrans, w_x, w_y, w_z, i, j, k);
indexToWorld(trans, i, j, k, w_x0, w_y0, w_z0);
indexToWorld(trans, i + 1, j + 1, k + 1, w_x1, w_y1, w_z1);
x = (w_x - w_x0) / (w_x1 - w_x0);
y = (w_y - w_y0) / (w_y1 - w_y0);
z = (w_z - w_z0) / (w_z1 - w_z0);
return linearInterpolate(g, i, j, k, x, y, z, dev_gridx, dev_gridy, dev_gridz);
}
// minbox is the corner of AABB with minimal coordinates - left bottom, maxbox is maximal corner
//code originally from http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
__device__ bool rayBoxIntersection(Vector3 minbox, Vector3 maxbox, const Vector3 &lineOrigin, const Vector3 &lineDirection, double *tmin, double *tmax) {
//avoid division with zero
Vector3 dir = lineDirection;
/*dir.x = sgn(dir.x)*::max(fabs(dir.x), DBL_MIN);
dir.y = sgn(dir.y)*::max(fabs(dir.y), DBL_MIN);
dir.z = sgn(dir.z)*::max(fabs(dir.z), DBL_MIN);*/
Vector3 dirfrac;
dirfrac.x = 1.0f / dir.x;
dirfrac.y = 1.0f / dir.y;
dirfrac.z = 1.0f / dir.z;
double xmin = (minbox.x - lineOrigin.x)*dirfrac.x;
double xmax = (maxbox.x - lineOrigin.x)*dirfrac.x;
double ymin = (minbox.y - lineOrigin.y)*dirfrac.y;
double ymax = (maxbox.y - lineOrigin.y)*dirfrac.y;
double zmin = (minbox.z - lineOrigin.z)*dirfrac.z;
double zmax = (maxbox.z - lineOrigin.z)*dirfrac.z;
binsort(&xmax, &xmin);
binsort(&ymax, &ymin);
binsort(&zmax, &zmin);
*tmin = d_max(d_max(xmin, ymin), zmin);
*tmax = d_min(d_min(xmax, ymax), zmax);
// if tmax < 0, ray (line) is intersecting AABB, but whole AABB is behind us (if tmin is < 0 i think we start inside the box //axel)
if (*tmax < 0)
return false;
// if tmin > tmax, ray doesn't intersect AABB
if (*tmin > *tmax)
return false;
return true;
}
__device__ Vector3 XYZtoLMS(const Vector3 &xyz) {
Vector3 lms(0.0, 0.0, 0.0);
// D65
/*lms.x = xyz.x * 0.400 + xyz.y * 0.708 + xyz.z * -0.081;
lms.y = xyz.x * -0.226 + xyz.y * 1.165 + xyz.z * 0.046;
lms.z = xyz.x * 0 + xyz.y * 0 + xyz.z * 0.918;*/
//CAT02
lms.x = 0.7328*xyz.x + 0.4296*xyz.y - 0.1624*xyz.z;
lms.y = -0.7036*xyz.x + 1.6975*xyz.y + 0.0061*xyz.z;
lms.z = 0.0030*xyz.x + 0.0136*xyz.y + 0.9834*xyz.z;
return lms;
}
__device__ Vector3 XYZtoRGB(const Vector3 &xyz) {
Vector3 rgb(0.0, 0.0, 0.0);
// sRGB
rgb.x = xyz.x * 3.2410 + xyz.y * -1.5374 + xyz.z * -0.4986;
rgb.y = xyz.x * -0.9692 + xyz.y * 1.8760 + xyz.z * 0.0416;
rgb.z = xyz.x * 0.0556 + xyz.y * -0.2040 + xyz.z * 1.0570;
return rgb;
}
__device__ Vector3 LMStoXYZ(const Vector3 &lms) {
Vector3 xyz(0.0, 0.0, 0.0);
// D65
/*xyz.x = lms.x * 1.861 + lms.y * -1.131 + lms.z * 2.209;
xyz.y = lms.x * 0.361 + lms.y * 0.639 + lms.z * -0.002;
xyz.z = lms.x * 0 + lms.y * 0 + lms.z * 10.89;*/
//CAT02
xyz.x = 1.09612 *lms.x - 0.278869*lms.y + 0.182745 *lms.z;
xyz.y = 0.454369 *lms.x + 0.473533 *lms.y + 0.0720978 *lms.z;
xyz.z = -0.00962761 *lms.x - 0.00569803 *lms.y + 1.01533 *lms.z;
return xyz;
}
__device__ double radiance(double lambda, double T) {
double C_1 = kC_1; // 0.00000000000000037417999999999999806568434161
double C_2 = kC_2; // 0.014387999999999999747868
return (2.0*C_1) / (pow(lambda, 5.0) * (exp(C_2 / (lambda*T)) - 1.0));
}
__global__ void colorKernel(int SPEC_TOTAL_SAMPLES, bool QUALITY_ROOM, double LeScale, int SPEC_SAMPLE_STEP, int CHROMA,
double *dev_le, double* dev_l, double *dev_le_mean, double *dev_temperature_grid, float *dev_image, double *dev_xm, double *dev_ym, double *dev_zm,
Vector3 *dev_eyepos, Vector3 *dev_forward, Vector3 *dev_right, Vector3 *dev_up, Vector3 *dev_minCoord, Vector3 *dev_maxCoord) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
double du = 2.0 / double(dev_xsize - 1);
double dv = 2.0 / double(dev_ysize - 1);
while (!(x < 0 || x >= dev_xsize || y < 0 || y >= dev_ysize)) {
double u = -1.0 + double(x) * du;
double v = -1.0 + double(y) * dv;
Vector3 nearPlanePos = *dev_eyepos + *dev_forward * dev_near_plane_distance + (*dev_right) * u * (dev_w / 2) + (*dev_up) * v * (dev_h / 2);
Vector3 direction = nearPlanePos - *dev_eyepos;
direction.normalize();
double tmin, tmax;
//Vector3 normal;
int index_p = y * (dev_xsize)+x;
//double *local_L = new double[SPEC_TOTAL_SAMPLES];
double local_L[5];
if (rayBoxIntersection(*dev_minCoord, *dev_maxCoord, nearPlanePos, direction, &tmin, &tmax)) {
if (tmax > 0.0) {
Vector3 endPoint = nearPlanePos + direction * tmax;
Vector3 startPoint;
if (tmin > 0.0) // set startpoint at first intersection with box if camera is outside the box
startPoint = nearPlanePos + direction*tmin;
else
startPoint = nearPlanePos;
for (size_t i = 0; i < SPEC_TOTAL_SAMPLES; i++) {
local_L[i] = 0.0;
if (QUALITY_ROOM) {
for (int a = 0; a < dev_gridx; a++) {
for (int b = 0; b < dev_gridy; b++) {
for (int c = 0; c < dev_gridz; c++) {
int index = (a * dev_gridy * dev_gridz +
b * dev_gridz +
c)*SPEC_TOTAL_SAMPLES + i;
double xw2, yw2, zw2;
indexToWorld(dev_fast_trans, a, b, c, xw2, yw2, zw2);
Vector3 p1(xw2, yw2, zw2);
Vector3 diff = p1 - endPoint;
double dist = diff.norm();
/**diffuse*/
local_L[i] += pow(dist, -2.0)*dev_le[index] * LeScale;
}
}
}
}
else {
if (dev_le_mean[i] > 0.0) {
double xw2, yw2, zw2;
indexToWorld(dev_fast_trans, dev_xm[i], dev_ym[i], dev_zm[i], xw2, yw2, zw2);
Vector3 p1(xw2, yw2, zw2);
Vector3 diff = p1 - endPoint;
double dist = diff.norm();
//diff.normalize(); // we dont have the normal and therefor we dont use the diffuse term here
//double diffuse = ::max(Vector3::dot(diff, normal), 0.0);
local_L[i] += pow(dist, -2.0)*dev_le_mean[i] * LeScale/**diffuse*/;
}
}
}
// ray casting
double length = (endPoint - startPoint).norm();
double steps = length / dev_wds;
int intsteps = int(steps);
Vector3 pos;
for (int z = 0; z < intsteps; z += 1) {
pos = endPoint - direction*double(z)*dev_wds; //From end to start, we traverse the ray backwards here.
if (worldIsValid(dev_fast_itrans, pos.x, pos.y, pos.z)) // check if pos is inside the grid
{
double T = valueAtWorld(dev_temperature_grid, dev_fast_itrans, dev_fast_trans, pos.x, pos.y, pos.z);
// calculate the radiance for each wave length sample
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
const double lambda = (360.0 + double(i*SPEC_SAMPLE_STEP) * 5)*1e-9;
local_L[i] = dev_C * local_L[i] + dev_oa * radiance(lambda, T) * dev_wds;
}
}
}
double restLength = (pos - nearPlanePos).norm();
double Crest = exp(-dev_ot * restLength);
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) // correct the intensity if startpoint is not at nearplane
local_L[i] = Crest*local_L[i];
// Calc XYZ-color from the radiance L
Vector3 XYZ = Vector3(0.0);
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
int j = SPEC_SAMPLE_STEP*i;
XYZ.x += local_L[i] * dev_CIE_X[j];
XYZ.y += local_L[i] * dev_CIE_Y[j];
XYZ.z += local_L[i] * dev_CIE_Z[j];
}
double SAMPLE_DL = 5e-9*double(SPEC_SAMPLE_STEP);
XYZ *= SAMPLE_DL;
// Chromatic adaption of the light, high CHROMA value decreases the intensity of the light
Vector3 LMS = XYZtoLMS(XYZ);
LMS.x = LMS.x / (LMS.x + CHROMA);
LMS.y = LMS.y / (LMS.y + CHROMA);
LMS.z = LMS.z / (LMS.z + CHROMA);
XYZ = LMStoXYZ(LMS);
Vector3 rgb = XYZtoRGB(XYZ);
dev_image[index_p * 3 + 0] = rgb.x; //R
dev_image[index_p * 3 + 1] = rgb.y; //G
dev_image[index_p * 3 + 2] = rgb.z; //B
}
else {
dev_image[index_p * 3 + 0] = 0.0; //R
dev_image[index_p * 3 + 1] = 0.0; //G
dev_image[index_p * 3 + 2] = 0.0; //B
}
}
else {
dev_image[index_p * 3 + 0] = 0.0; //R
dev_image[index_p * 3 + 1] = 0.0; //G
dev_image[index_p * 3 + 2] = 0.0; //B
}
// print progress
// update index
x += blockDim.x * gridDim.x;
y += blockDim.y * gridDim.y;
}
}
hipError_t Renderer::loadConstantMem() {
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = hipMemcpyToSymbol(dev_xsize, &_x, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpyToSymbol(dev_ysize, &_y, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpyToSymbol(dev_gridx, &_volume->_grid->_xdim, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! gridx");
;
}
cudaStatus = hipMemcpyToSymbol(dev_gridy, &_volume->_grid->_ydim, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpyToSymbol(dev_gridz, &_volume->_grid->_zdim, sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpyToSymbol(dev_oa, &_volume->_oa, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! oa");
}
cudaStatus = hipMemcpyToSymbol(dev_os, &_volume->_os, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_ot, &_volume->_ot, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_wds, &_volume->_wds, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_w, &_cam->_film->_w, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_h, &_cam->_film->_h, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_near_plane_distance, &_cam->_film->_nearPlaneDistance, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
double tr = _volume->transparency();
cudaStatus = hipMemcpyToSymbol(dev_C, &tr, sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_CIE_X, &CIE_X, kCIELEN * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpyToSymbol(dev_CIE_Y, &CIE_Y, kCIELEN * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpyToSymbol(dev_CIE_Z, &CIE_Z, kCIELEN * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
/* TODO: why can't use trans*/
cudaStatus = hipMemcpyToSymbol(dev_fast_trans, &(_volume->_grid->_trans[0]), 16 * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpyToSymbol(dev_fast_itrans, &(_volume->_grid->_itrans[0]), 16 * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipGetLastError();
if (cudaStatus = hipSuccess) {
std::cout << "Constant memory load done." << std::endl;
}
return cudaStatus;
}
hipError_t Renderer::loadMem(Vector3 *deyepos, Vector3 *dforward, Vector3 *dright, Vector3 *dup, Vector3 *devmincoord, Vector3 *devmaxcoord,
float *dimg, double *devT, double *dle, double *dl, double *dlemean, double *dxm, double *dym, double *dzm,
double *Le, double *L, double *LeMean, double *xm, double *ym, double *zm,
double *temperature_grid) {
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = hipMalloc((void**)&deyepos, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dforward, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dright, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dup, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(deyepos, &_cam->_eyepos, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(dforward, &_cam->_forward, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(dright, &_cam->_right, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(dup, &_cam->_up, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMalloc((void**)&devmincoord, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&devmaxcoord, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMemcpy(devmincoord, &_volume->_grid->_min_coord, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(devmaxcoord, &_volume->_grid->_max_coord, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMalloc((void**)&dxm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dym, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dzm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
int imsize = _x * _y * 3;
cudaStatus = hipMalloc((void**)&dimg, imsize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&devT, _volume->_grid->getSize() * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
int LeSize = _volume->_grid->_xdim * _volume->_grid->_ydim * _volume->_grid->_zdim * SPEC_TOTAL_SAMPLES;
cudaStatus = hipMalloc((void**)&dle, LeSize * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&dl, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&dlemean, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(devT, temperature_grid, _volume->_grid->getSize() * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dlemean, LeMean, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dle, Le, LeSize * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dl, L, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dxm, xm, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dym, ym, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dzm, zm, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
return cudaStatus;
}
float* Renderer::drawFire(float* temperature_grid, float *image) {
// transform
// grid index to word coordinate
_volume->_grid->indexToWorld(_volume->_grid->_trans, 0, 0, 0, _volume->_grid->_min_coord.x, _volume->_grid->_min_coord.y, _volume->_grid->_min_coord.z);
_volume->_grid->indexToWorld(_volume->_grid->_trans, _volume->_grid->_xdim - 1, _volume->_grid->_ydim - 1, _volume->_grid->_zdim - 1, _volume->_grid->_max_coord.x, _volume->_grid->_max_coord.y, _volume->_grid->_max_coord.z);
const int IMGSIZE = _x * _y * 3;
int LeSize = _volume->_grid->_xdim * _volume->_grid->_ydim * _volume->_grid->_zdim * SPEC_TOTAL_SAMPLES;
double *Le = new double[LeSize];
double *L = new double[SPEC_TOTAL_SAMPLES];
double * LeMean = new double[SPEC_TOTAL_SAMPLES];
double *xm = new double[SPEC_TOTAL_SAMPLES];
double *ym = new double[SPEC_TOTAL_SAMPLES];
double *zm = new double[SPEC_TOTAL_SAMPLES];
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i++) {
xm[i] = 0; ym[i] = 0; zm[i] = 0;
}
/* Start rendering */
float startTime = omp_get_wtime();
int n = 0;
#pragma omp parallel
{
if (!QUALITY_ROOM) {
// reset the mean radiance value
#pragma omp for
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
LeMean[i] = 0.0;
}
}
#pragma omp for
for (int x = 0; x < _volume->_grid->_xdim; ++x) {
for (int y = 0; y < _volume->_grid->_ydim; ++y) {
for (int z = 0; z < _volume->_grid->_zdim; ++z) {
//const double T = valueAtIndex(x, y, z);
int idx = x + _volume->_grid->_xdim*y + _volume->_grid->_xdim*_volume->_grid->_ydim*z;
const double T = temperature_grid[idx];
//std::cout << std::endl << T << " | ";
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
int index = (x*_volume->_grid->_ydim*_volume->_grid->_zdim +
y*_volume->_grid->_zdim +
z)*SPEC_TOTAL_SAMPLES + i;
const double lambda = (360.0 + double(i*SPEC_SAMPLE_STEP) * 5)*1e-9;
Le[index] = _volume->radiance(lambda, T);
//std::cout << Le[index] << std::endl;
if (Le[index] < 0.0) Le[index] == 0.0;
Le[index] = _volume->_oa*Le[index] * _volume->_grid->dx()*_volume->_grid->dy()*_volume->_grid->dz();
if (!QUALITY_ROOM) {
LeMean[i] += Le[index];
xm[i] += double(x)*Le[index];
ym[i] += double(y)*Le[index];
zm[i] += double(z)*Le[index];
//std::cout << x << "," << y << "," << z << "," << i << " | " << LeMean[i] << " | " << xm[i] << std::endl;
}
//std::cout << LeMean[i] << ", " << "|" << index;
}
}
}
}
if (!QUALITY_ROOM) {
#pragma omp for
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
if (LeMean[i] > 0.0) {
xm[i] /= LeMean[i];
ym[i] /= LeMean[i];
zm[i] /= LeMean[i];
}
}
}
double *local_L = &L[omp_get_thread_num()*SPEC_TOTAL_SAMPLES];
Vector3 normal = Vector3(0.0, 0.0, 1.0); //Not used yet, since we dont have a way to find the normal with the box yet.
//#pragma omp for
}
//copy value
Vector3 *d_eyepos;
Vector3 *d_forward;
Vector3 *d_right;
Vector3 *d_up;
Vector3 *dev_minCoord;
Vector3 *dev_maxCoord;
float *dev_img = new float[IMGSIZE];
double *dev_T = new double[_volume->_grid->getSize()];
double *dev_le = new double[LeSize];
double *dev_l = new double[SPEC_TOTAL_SAMPLES];
double *dev_le_mean = new double[SPEC_TOTAL_SAMPLES];
double *dev_xm = new double[SPEC_TOTAL_SAMPLES];
double *dev_ym = new double[SPEC_TOTAL_SAMPLES];
double *dev_zm = new double[SPEC_TOTAL_SAMPLES];
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSetCacheConfig failed!");
}
cudaStatus = hipMalloc((void**)&d_eyepos, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&d_forward, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&d_right, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&d_up, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(d_eyepos, &_cam->_eyepos, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(d_forward, &_cam->_forward, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(d_right, &_cam->_right, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(d_up, &_cam->_up, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMalloc((void**)&dev_minCoord, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dev_maxCoord, sizeof(Vector3));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMemcpy(dev_minCoord, &_volume->_grid->_min_coord, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMemcpy(dev_maxCoord, &_volume->_grid->_max_coord, sizeof(Vector3), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
cudaStatus = hipMalloc((void**)&dev_xm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dev_ym, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
cudaStatus = hipMalloc((void**)&dev_zm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
int imsize = _x * _y * 3;
cudaStatus = hipMalloc((void**)&dev_img, imsize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&dev_T, _volume->_grid->getSize() * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&dev_le, LeSize * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&dev_l, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
cudaStatus = hipMalloc((void**)&dev_le_mean, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_T, temperature_grid, _volume->_grid->getSize() * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dev_le_mean, LeMean, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dev_le, Le, LeSize * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dev_l, L, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dev_xm, xm, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dev_ym, ym, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
cudaStatus = hipMemcpy(dev_zm, zm, SPEC_TOTAL_SAMPLES * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
;
}
// call kernel
std::cout << "start kernel... " << std::endl;
dim3 color_grid(32, 32);
//dim3 color_block(SPEC_TOTAL_SAMPLES);
dim3 color_block(16, 16);
hipEvent_t start, stop;
float elapse_time;
hipEventCreate(&start);
hipEventRecord(start, 0);
colorKernel << <color_grid, color_block >> > (SPEC_TOTAL_SAMPLES, QUALITY_ROOM, _volume->LeScale, SPEC_SAMPLE_STEP, CHROMA, dev_le, dev_l, dev_le_mean, dev_T, dev_img, dev_xm, dev_ym, dev_zm, d_eyepos, d_forward, d_right, d_up, dev_minCoord, dev_maxCoord);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapse_time, start, stop);
printf("Elapsed time : %f ms\n", elapse_time);
// hipError_t cudaStatus;
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "colorKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
//cudaStatus = hipDeviceSynchronize();
//if (cudaStatus != hipSuccess) {
/* fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching colorKernel!\n", cudaStatus);
;
}*/
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(image, dev_img, IMGSIZE * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
/*cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
}*/
return image;
}
#endif
| 530177ed69a54dff4a31a1e210a36df49ff8464b.cu | #if 0
#include "Renderer.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "util.h"
struct Presets;
#define PI 3.14159265359;
#define kC_1 3.7418e-16
#define kC_2 1.4388e-2
__constant__ int dev_xsize;
__constant__ int dev_ysize;
__constant__ int dev_gridx;
__constant__ int dev_gridy;
__constant__ int dev_gridz;
__constant__ double dev_oa;
__constant__ double dev_os;
__constant__ double dev_ot;
__constant__ double dev_w;
__constant__ double dev_h;
__constant__ double dev_near_plane_distance;
__constant__ double dev_wds;
__constant__ double dev_C;
__constant__ double dev_CIE_X[kCIELEN];
__constant__ double dev_CIE_Y[kCIELEN];
__constant__ double dev_CIE_Z[kCIELEN];
__constant__ double dev_fast_trans[16];
__constant__ double dev_fast_itrans[16];
const double CIE_X[] = {
1.299000e-04, 2.321000e-04, 4.149000e-04, 7.416000e-04, 1.368000e-03,
2.236000e-03, 4.243000e-03, 7.650000e-03, 1.431000e-02, 2.319000e-02,
4.351000e-02, 7.763000e-02, 1.343800e-01, 2.147700e-01, 2.839000e-01,
3.285000e-01, 3.482800e-01, 3.480600e-01, 3.362000e-01, 3.187000e-01,
2.908000e-01, 2.511000e-01, 1.953600e-01, 1.421000e-01, 9.564000e-02,
5.795001e-02, 3.201000e-02, 1.470000e-02, 4.900000e-03, 2.400000e-03,
9.300000e-03, 2.910000e-02, 6.327000e-02, 1.096000e-01, 1.655000e-01,
2.257499e-01, 2.904000e-01, 3.597000e-01, 4.334499e-01, 5.120501e-01,
5.945000e-01, 6.784000e-01, 7.621000e-01, 8.425000e-01, 9.163000e-01,
9.786000e-01, 1.026300e+00, 1.056700e+00, 1.062200e+00, 1.045600e+00,
1.002600e+00, 9.384000e-01, 8.544499e-01, 7.514000e-01, 6.424000e-01,
5.419000e-01, 4.479000e-01, 3.608000e-01, 2.835000e-01, 2.187000e-01,
1.649000e-01, 1.212000e-01, 8.740000e-02, 6.360000e-02, 4.677000e-02,
3.290000e-02, 2.270000e-02, 1.584000e-02, 1.135916e-02, 8.110916e-03,
5.790346e-03, 4.106457e-03, 2.899327e-03, 2.049190e-03, 1.439971e-03,
9.999493e-04, 6.900786e-04, 4.760213e-04, 3.323011e-04, 2.348261e-04,
1.661505e-04, 1.174130e-04, 8.307527e-05, 5.870652e-05, 4.150994e-05,
2.935326e-05, 2.067383e-05, 1.455977e-05, 1.025398e-05, 7.221456e-06,
5.085868e-06, 3.581652e-06, 2.522525e-06, 1.776509e-06, 1.251141e-06
};
const double CIE_Y[] = { 3.917000e-06, 6.965000e-06, 1.239000e-05, 2.202000e-05, 3.900000e-05,
6.400000e-05, 1.200000e-04, 2.170000e-04, 3.960000e-04, 6.400000e-04,
1.210000e-03, 2.180000e-03, 4.000000e-03, 7.300000e-03, 1.160000e-02,
1.684000e-02, 2.300000e-02, 2.980000e-02, 3.800000e-02, 4.800000e-02,
6.000000e-02, 7.390000e-02, 9.098000e-02, 1.126000e-01, 1.390200e-01,
1.693000e-01, 2.080200e-01, 2.586000e-01, 3.230000e-01, 4.073000e-01,
5.030000e-01, 6.082000e-01, 7.100000e-01, 7.932000e-01, 8.620000e-01,
9.148501e-01, 9.540000e-01, 9.803000e-01, 9.949501e-01, 1.000000e+00,
9.950000e-01, 9.786000e-01, 9.520000e-01, 9.154000e-01, 8.700000e-01,
8.163000e-01, 7.570000e-01, 6.949000e-01, 6.310000e-01, 5.668000e-01,
5.030000e-01, 4.412000e-01, 3.810000e-01, 3.210000e-01, 2.650000e-01,
2.170000e-01, 1.750000e-01, 1.382000e-01, 1.070000e-01, 8.160000e-02,
6.100000e-02, 4.458000e-02, 3.200000e-02, 2.320000e-02, 1.700000e-02,
1.192000e-02, 8.210000e-03, 5.723000e-03, 4.102000e-03, 2.929000e-03,
2.091000e-03, 1.484000e-03, 1.047000e-03, 7.400000e-04, 5.200000e-04,
3.611000e-04, 2.492000e-04, 1.719000e-04, 1.200000e-04, 8.480000e-05,
6.000000e-05, 4.240000e-05, 3.000000e-05, 2.120000e-05, 1.499000e-05,
1.060000e-05, 7.465700e-06, 5.257800e-06, 3.702900e-06, 2.607800e-06,
1.836600e-06, 1.293400e-06, 9.109300e-07, 6.415300e-07, 4.518100e-07 };
const double CIE_Z[] = { 6.061000e-04, 1.086000e-03, 1.946000e-03, 3.486000e-03, 6.450001e-03,
1.054999e-02, 2.005001e-02, 3.621000e-02, 6.785001e-02, 1.102000e-01,
2.074000e-01, 3.713000e-01, 6.456000e-01, 1.039050e+00, 1.385600e+00,
1.622960e+00, 1.747060e+00, 1.782600e+00, 1.772110e+00, 1.744100e+00,
1.669200e+00, 1.528100e+00, 1.287640e+00, 1.041900e+00, 8.129501e-01,
6.162000e-01, 4.651800e-01, 3.533000e-01, 2.720000e-01, 2.123000e-01,
1.582000e-01, 1.117000e-01, 7.824999e-02, 5.725001e-02, 4.216000e-02,
2.984000e-02, 2.030000e-02, 1.340000e-02, 8.749999e-03, 5.749999e-03,
3.900000e-03, 2.749999e-03, 2.100000e-03, 1.800000e-03, 1.650001e-03,
1.400000e-03, 1.100000e-03, 1.000000e-03, 8.000000e-04, 6.000000e-04,
3.400000e-04, 2.400000e-04, 1.900000e-04, 1.000000e-04, 4.999999e-05,
3.000000e-05, 2.000000e-05, 1.000000e-05, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00 };
__device__ void indexToLocal(int i, int j, int k, double &l_x, double &l_y, double &l_z) {
l_x = double(i) / double(dev_gridx - 1);
l_y = double(j) / double(dev_gridy - 1);
l_z = double(k) / double(dev_gridz - 1);
}
__device__ void localToWorld(double *trans, double l_x, double l_y, double l_z, double &w_x, double &w_y, double &w_z) {
w_x = l_x*trans[0] + l_y*trans[1] + l_z*trans[2] + trans[3];
w_y = l_x*trans[4] + l_y*trans[5] + l_z*trans[6] + trans[7];
w_z = l_x*trans[8] + l_y*trans[9] + l_z*trans[10] + trans[11];
}
__device__ void indexToWorld(double *trans, int i, int j, int k, double &w_x, double &w_y, double &w_z) {
double l_x, l_y, l_z;
indexToLocal(i, j, k, l_x, l_y, l_z);
localToWorld(trans, l_x, l_y, l_z, w_x, w_y, w_z);
}
__device__ void worldToLocal(double *itrans, const double w_x, const double w_y, const double w_z, double &l_x, double &l_y, double &l_z) {
l_x = w_x*itrans[0] + w_y*itrans[1] + w_z*itrans[2] + itrans[3];
l_y = w_x*itrans[4] + w_y*itrans[5] + w_z*itrans[6] + itrans[7];
l_z = w_x*itrans[8] + w_y*itrans[9] + w_z*itrans[10] + itrans[11];
}
__device__ void localToUpperLeftIndex(const double l_x, const double l_y, const double l_z, int &i, int &j, int &k) {
i = (int)floor(l_x*double(dev_gridx - 1));
j = (int)floor(l_y*double(dev_gridy - 1));
k = (int)floor(l_z*double(dev_gridz - 1));
}
__device__ void worldToUpperLeftIndex(double *itrans, const double w_x, const double w_y, const double w_z, int &i, int &j, int &k) {
double l_x, l_y, l_z;
worldToLocal(itrans, w_x, w_y, w_z, l_x, l_y, l_z);
localToUpperLeftIndex(l_x, l_y, l_z, i, j, k);
}
__device__ bool localIsValid(double l_x, double l_y, double l_z) {
return l_x >= 0.0 && l_x < 1.0 && l_y >= 0.0 && l_y < 1.0 && l_z >= 0.0 && l_z < 1.0;
}
__device__ bool worldIsValid(double *itrans, const double w_x, const double w_y, const double w_z) {
double l_x, l_y, l_z;
worldToLocal(itrans, w_x, w_y, w_z, l_x, l_y, l_z);
return localIsValid(l_x, l_y, l_z);
}
__device__ double valueAtWorld(double *g, double *itrans, double* trans, double w_x, double w_y, double w_z) {
int i, j, k;
double w_x0, w_y0, w_z0;
double w_x1, w_y1, w_z1;
double x, y, z;
//Konvertera v�rldskoordinater till cellkoordinater
worldToUpperLeftIndex(itrans, w_x, w_y, w_z, i, j, k);
indexToWorld(trans, i, j, k, w_x0, w_y0, w_z0);
indexToWorld(trans, i + 1, j + 1, k + 1, w_x1, w_y1, w_z1);
x = (w_x - w_x0) / (w_x1 - w_x0);
y = (w_y - w_y0) / (w_y1 - w_y0);
z = (w_z - w_z0) / (w_z1 - w_z0);
return linearInterpolate(g, i, j, k, x, y, z, dev_gridx, dev_gridy, dev_gridz);
}
// minbox is the corner of AABB with minimal coordinates - left bottom, maxbox is maximal corner
//code originally from http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
__device__ bool rayBoxIntersection(Vector3 minbox, Vector3 maxbox, const Vector3 &lineOrigin, const Vector3 &lineDirection, double *tmin, double *tmax) {
//avoid division with zero
Vector3 dir = lineDirection;
/*dir.x = sgn(dir.x)*std::max(fabs(dir.x), DBL_MIN);
dir.y = sgn(dir.y)*std::max(fabs(dir.y), DBL_MIN);
dir.z = sgn(dir.z)*std::max(fabs(dir.z), DBL_MIN);*/
Vector3 dirfrac;
dirfrac.x = 1.0f / dir.x;
dirfrac.y = 1.0f / dir.y;
dirfrac.z = 1.0f / dir.z;
double xmin = (minbox.x - lineOrigin.x)*dirfrac.x;
double xmax = (maxbox.x - lineOrigin.x)*dirfrac.x;
double ymin = (minbox.y - lineOrigin.y)*dirfrac.y;
double ymax = (maxbox.y - lineOrigin.y)*dirfrac.y;
double zmin = (minbox.z - lineOrigin.z)*dirfrac.z;
double zmax = (maxbox.z - lineOrigin.z)*dirfrac.z;
binsort(&xmax, &xmin);
binsort(&ymax, &ymin);
binsort(&zmax, &zmin);
*tmin = d_max(d_max(xmin, ymin), zmin);
*tmax = d_min(d_min(xmax, ymax), zmax);
// if tmax < 0, ray (line) is intersecting AABB, but whole AABB is behind us (if tmin is < 0 i think we start inside the box //axel)
if (*tmax < 0)
return false;
// if tmin > tmax, ray doesn't intersect AABB
if (*tmin > *tmax)
return false;
return true;
}
__device__ Vector3 XYZtoLMS(const Vector3 &xyz) {
Vector3 lms(0.0, 0.0, 0.0);
// D65
/*lms.x = xyz.x * 0.400 + xyz.y * 0.708 + xyz.z * -0.081;
lms.y = xyz.x * -0.226 + xyz.y * 1.165 + xyz.z * 0.046;
lms.z = xyz.x * 0 + xyz.y * 0 + xyz.z * 0.918;*/
//CAT02
lms.x = 0.7328*xyz.x + 0.4296*xyz.y - 0.1624*xyz.z;
lms.y = -0.7036*xyz.x + 1.6975*xyz.y + 0.0061*xyz.z;
lms.z = 0.0030*xyz.x + 0.0136*xyz.y + 0.9834*xyz.z;
return lms;
}
__device__ Vector3 XYZtoRGB(const Vector3 &xyz) {
Vector3 rgb(0.0, 0.0, 0.0);
// sRGB
rgb.x = xyz.x * 3.2410 + xyz.y * -1.5374 + xyz.z * -0.4986;
rgb.y = xyz.x * -0.9692 + xyz.y * 1.8760 + xyz.z * 0.0416;
rgb.z = xyz.x * 0.0556 + xyz.y * -0.2040 + xyz.z * 1.0570;
return rgb;
}
__device__ Vector3 LMStoXYZ(const Vector3 &lms) {
Vector3 xyz(0.0, 0.0, 0.0);
// D65
/*xyz.x = lms.x * 1.861 + lms.y * -1.131 + lms.z * 2.209;
xyz.y = lms.x * 0.361 + lms.y * 0.639 + lms.z * -0.002;
xyz.z = lms.x * 0 + lms.y * 0 + lms.z * 10.89;*/
//CAT02
xyz.x = 1.09612 *lms.x - 0.278869*lms.y + 0.182745 *lms.z;
xyz.y = 0.454369 *lms.x + 0.473533 *lms.y + 0.0720978 *lms.z;
xyz.z = -0.00962761 *lms.x - 0.00569803 *lms.y + 1.01533 *lms.z;
return xyz;
}
__device__ double radiance(double lambda, double T) {
double C_1 = kC_1; // 0.00000000000000037417999999999999806568434161
double C_2 = kC_2; // 0.014387999999999999747868
return (2.0*C_1) / (pow(lambda, 5.0) * (exp(C_2 / (lambda*T)) - 1.0));
}
__global__ void colorKernel(int SPEC_TOTAL_SAMPLES, bool QUALITY_ROOM, double LeScale, int SPEC_SAMPLE_STEP, int CHROMA,
double *dev_le, double* dev_l, double *dev_le_mean, double *dev_temperature_grid, float *dev_image, double *dev_xm, double *dev_ym, double *dev_zm,
Vector3 *dev_eyepos, Vector3 *dev_forward, Vector3 *dev_right, Vector3 *dev_up, Vector3 *dev_minCoord, Vector3 *dev_maxCoord) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
double du = 2.0 / double(dev_xsize - 1);
double dv = 2.0 / double(dev_ysize - 1);
while (!(x < 0 || x >= dev_xsize || y < 0 || y >= dev_ysize)) {
double u = -1.0 + double(x) * du;
double v = -1.0 + double(y) * dv;
Vector3 nearPlanePos = *dev_eyepos + *dev_forward * dev_near_plane_distance + (*dev_right) * u * (dev_w / 2) + (*dev_up) * v * (dev_h / 2);
Vector3 direction = nearPlanePos - *dev_eyepos;
direction.normalize();
double tmin, tmax;
//Vector3 normal;
int index_p = y * (dev_xsize)+x;
//double *local_L = new double[SPEC_TOTAL_SAMPLES];
double local_L[5];
if (rayBoxIntersection(*dev_minCoord, *dev_maxCoord, nearPlanePos, direction, &tmin, &tmax)) {
if (tmax > 0.0) {
Vector3 endPoint = nearPlanePos + direction * tmax;
Vector3 startPoint;
if (tmin > 0.0) // set startpoint at first intersection with box if camera is outside the box
startPoint = nearPlanePos + direction*tmin;
else
startPoint = nearPlanePos;
for (size_t i = 0; i < SPEC_TOTAL_SAMPLES; i++) {
local_L[i] = 0.0;
if (QUALITY_ROOM) {
for (int a = 0; a < dev_gridx; a++) {
for (int b = 0; b < dev_gridy; b++) {
for (int c = 0; c < dev_gridz; c++) {
int index = (a * dev_gridy * dev_gridz +
b * dev_gridz +
c)*SPEC_TOTAL_SAMPLES + i;
double xw2, yw2, zw2;
indexToWorld(dev_fast_trans, a, b, c, xw2, yw2, zw2);
Vector3 p1(xw2, yw2, zw2);
Vector3 diff = p1 - endPoint;
double dist = diff.norm();
/**diffuse*/
local_L[i] += pow(dist, -2.0)*dev_le[index] * LeScale;
}
}
}
}
else {
if (dev_le_mean[i] > 0.0) {
double xw2, yw2, zw2;
indexToWorld(dev_fast_trans, dev_xm[i], dev_ym[i], dev_zm[i], xw2, yw2, zw2);
Vector3 p1(xw2, yw2, zw2);
Vector3 diff = p1 - endPoint;
double dist = diff.norm();
//diff.normalize(); // we dont have the normal and therefor we dont use the diffuse term here
//double diffuse = std::max(Vector3::dot(diff, normal), 0.0);
local_L[i] += pow(dist, -2.0)*dev_le_mean[i] * LeScale/**diffuse*/;
}
}
}
// ray casting
double length = (endPoint - startPoint).norm();
double steps = length / dev_wds;
int intsteps = int(steps);
Vector3 pos;
for (int z = 0; z < intsteps; z += 1) {
pos = endPoint - direction*double(z)*dev_wds; //From end to start, we traverse the ray backwards here.
if (worldIsValid(dev_fast_itrans, pos.x, pos.y, pos.z)) // check if pos is inside the grid
{
double T = valueAtWorld(dev_temperature_grid, dev_fast_itrans, dev_fast_trans, pos.x, pos.y, pos.z);
// calculate the radiance for each wave length sample
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
const double lambda = (360.0 + double(i*SPEC_SAMPLE_STEP) * 5)*1e-9;
local_L[i] = dev_C * local_L[i] + dev_oa * radiance(lambda, T) * dev_wds;
}
}
}
double restLength = (pos - nearPlanePos).norm();
double Crest = exp(-dev_ot * restLength);
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) // correct the intensity if startpoint is not at nearplane
local_L[i] = Crest*local_L[i];
// Calc XYZ-color from the radiance L
Vector3 XYZ = Vector3(0.0);
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
int j = SPEC_SAMPLE_STEP*i;
XYZ.x += local_L[i] * dev_CIE_X[j];
XYZ.y += local_L[i] * dev_CIE_Y[j];
XYZ.z += local_L[i] * dev_CIE_Z[j];
}
double SAMPLE_DL = 5e-9*double(SPEC_SAMPLE_STEP);
XYZ *= SAMPLE_DL;
// Chromatic adaption of the light, high CHROMA value decreases the intensity of the light
Vector3 LMS = XYZtoLMS(XYZ);
LMS.x = LMS.x / (LMS.x + CHROMA);
LMS.y = LMS.y / (LMS.y + CHROMA);
LMS.z = LMS.z / (LMS.z + CHROMA);
XYZ = LMStoXYZ(LMS);
Vector3 rgb = XYZtoRGB(XYZ);
dev_image[index_p * 3 + 0] = rgb.x; //R
dev_image[index_p * 3 + 1] = rgb.y; //G
dev_image[index_p * 3 + 2] = rgb.z; //B
}
else {
dev_image[index_p * 3 + 0] = 0.0; //R
dev_image[index_p * 3 + 1] = 0.0; //G
dev_image[index_p * 3 + 2] = 0.0; //B
}
}
else {
dev_image[index_p * 3 + 0] = 0.0; //R
dev_image[index_p * 3 + 1] = 0.0; //G
dev_image[index_p * 3 + 2] = 0.0; //B
}
// print progress
// update index
x += blockDim.x * gridDim.x;
y += blockDim.y * gridDim.y;
}
}
cudaError_t Renderer::loadConstantMem() {
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = cudaMemcpyToSymbol(dev_xsize, &_x, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_ysize, &_y, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_gridx, &_volume->_grid->_xdim, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! gridx");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_gridy, &_volume->_grid->_ydim, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_gridz, &_volume->_grid->_zdim, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_oa, &_volume->_oa, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! oa");
}
cudaStatus = cudaMemcpyToSymbol(dev_os, &_volume->_os, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_ot, &_volume->_ot, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_wds, &_volume->_wds, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_w, &_cam->_film->_w, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_h, &_cam->_film->_h, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_near_plane_distance, &_cam->_film->_nearPlaneDistance, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
double tr = _volume->transparency();
cudaStatus = cudaMemcpyToSymbol(dev_C, &tr, sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_CIE_X, &CIE_X, kCIELEN * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_CIE_Y, &CIE_Y, kCIELEN * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpyToSymbol(dev_CIE_Z, &CIE_Z, kCIELEN * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
/* TODO: why can't use trans*/
cudaStatus = cudaMemcpyToSymbol(dev_fast_trans, &(_volume->_grid->_trans[0]), 16 * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpyToSymbol(dev_fast_itrans, &(_volume->_grid->_itrans[0]), 16 * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaGetLastError();
if (cudaStatus = cudaSuccess) {
std::cout << "Constant memory load done." << std::endl;
}
return cudaStatus;
}
cudaError_t Renderer::loadMem(Vector3 *deyepos, Vector3 *dforward, Vector3 *dright, Vector3 *dup, Vector3 *devmincoord, Vector3 *devmaxcoord,
float *dimg, double *devT, double *dle, double *dl, double *dlemean, double *dxm, double *dym, double *dzm,
double *Le, double *L, double *LeMean, double *xm, double *ym, double *zm,
double *temperature_grid) {
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = cudaMalloc((void**)&deyepos, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dforward, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dright, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dup, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(deyepos, &_cam->_eyepos, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(dforward, &_cam->_forward, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(dright, &_cam->_right, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(dup, &_cam->_up, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMalloc((void**)&devmincoord, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&devmaxcoord, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMemcpy(devmincoord, &_volume->_grid->_min_coord, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(devmaxcoord, &_volume->_grid->_max_coord, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMalloc((void**)&dxm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dym, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dzm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
int imsize = _x * _y * 3;
cudaStatus = cudaMalloc((void**)&dimg, imsize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&devT, _volume->_grid->getSize() * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
int LeSize = _volume->_grid->_xdim * _volume->_grid->_ydim * _volume->_grid->_zdim * SPEC_TOTAL_SAMPLES;
cudaStatus = cudaMalloc((void**)&dle, LeSize * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&dl, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&dlemean, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(devT, temperature_grid, _volume->_grid->getSize() * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dlemean, LeMean, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dle, Le, LeSize * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dl, L, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dxm, xm, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dym, ym, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dzm, zm, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
return cudaStatus;
}
float* Renderer::drawFire(float* temperature_grid, float *image) {
// transform
// grid index to word coordinate
_volume->_grid->indexToWorld(_volume->_grid->_trans, 0, 0, 0, _volume->_grid->_min_coord.x, _volume->_grid->_min_coord.y, _volume->_grid->_min_coord.z);
_volume->_grid->indexToWorld(_volume->_grid->_trans, _volume->_grid->_xdim - 1, _volume->_grid->_ydim - 1, _volume->_grid->_zdim - 1, _volume->_grid->_max_coord.x, _volume->_grid->_max_coord.y, _volume->_grid->_max_coord.z);
const int IMGSIZE = _x * _y * 3;
int LeSize = _volume->_grid->_xdim * _volume->_grid->_ydim * _volume->_grid->_zdim * SPEC_TOTAL_SAMPLES;
double *Le = new double[LeSize];
double *L = new double[SPEC_TOTAL_SAMPLES];
double * LeMean = new double[SPEC_TOTAL_SAMPLES];
double *xm = new double[SPEC_TOTAL_SAMPLES];
double *ym = new double[SPEC_TOTAL_SAMPLES];
double *zm = new double[SPEC_TOTAL_SAMPLES];
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i++) {
xm[i] = 0; ym[i] = 0; zm[i] = 0;
}
/* Start rendering */
float startTime = omp_get_wtime();
int n = 0;
#pragma omp parallel
{
if (!QUALITY_ROOM) {
// reset the mean radiance value
#pragma omp for
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
LeMean[i] = 0.0;
}
}
#pragma omp for
for (int x = 0; x < _volume->_grid->_xdim; ++x) {
for (int y = 0; y < _volume->_grid->_ydim; ++y) {
for (int z = 0; z < _volume->_grid->_zdim; ++z) {
//const double T = valueAtIndex(x, y, z);
int idx = x + _volume->_grid->_xdim*y + _volume->_grid->_xdim*_volume->_grid->_ydim*z;
const double T = temperature_grid[idx];
//std::cout << std::endl << T << " | ";
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
int index = (x*_volume->_grid->_ydim*_volume->_grid->_zdim +
y*_volume->_grid->_zdim +
z)*SPEC_TOTAL_SAMPLES + i;
const double lambda = (360.0 + double(i*SPEC_SAMPLE_STEP) * 5)*1e-9;
Le[index] = _volume->radiance(lambda, T);
//std::cout << Le[index] << std::endl;
if (Le[index] < 0.0) Le[index] == 0.0;
Le[index] = _volume->_oa*Le[index] * _volume->_grid->dx()*_volume->_grid->dy()*_volume->_grid->dz();
if (!QUALITY_ROOM) {
LeMean[i] += Le[index];
xm[i] += double(x)*Le[index];
ym[i] += double(y)*Le[index];
zm[i] += double(z)*Le[index];
//std::cout << x << "," << y << "," << z << "," << i << " | " << LeMean[i] << " | " << xm[i] << std::endl;
}
//std::cout << LeMean[i] << ", " << "|" << index;
}
}
}
}
if (!QUALITY_ROOM) {
#pragma omp for
for (int i = 0; i < SPEC_TOTAL_SAMPLES; i += 1) {
if (LeMean[i] > 0.0) {
xm[i] /= LeMean[i];
ym[i] /= LeMean[i];
zm[i] /= LeMean[i];
}
}
}
double *local_L = &L[omp_get_thread_num()*SPEC_TOTAL_SAMPLES];
Vector3 normal = Vector3(0.0, 0.0, 1.0); //Not used yet, since we dont have a way to find the normal with the box yet.
//#pragma omp for
}
//copy value
Vector3 *d_eyepos;
Vector3 *d_forward;
Vector3 *d_right;
Vector3 *d_up;
Vector3 *dev_minCoord;
Vector3 *dev_maxCoord;
float *dev_img = new float[IMGSIZE];
double *dev_T = new double[_volume->_grid->getSize()];
double *dev_le = new double[LeSize];
double *dev_l = new double[SPEC_TOTAL_SAMPLES];
double *dev_le_mean = new double[SPEC_TOTAL_SAMPLES];
double *dev_xm = new double[SPEC_TOTAL_SAMPLES];
double *dev_ym = new double[SPEC_TOTAL_SAMPLES];
double *dev_zm = new double[SPEC_TOTAL_SAMPLES];
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSetCacheConfig failed!");
}
cudaStatus = cudaMalloc((void**)&d_eyepos, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&d_forward, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&d_right, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&d_up, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(d_eyepos, &_cam->_eyepos, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(d_forward, &_cam->_forward, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(d_right, &_cam->_right, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(d_up, &_cam->_up, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMalloc((void**)&dev_minCoord, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_maxCoord, sizeof(Vector3));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMemcpy(dev_minCoord, &_volume->_grid->_min_coord, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(dev_maxCoord, &_volume->_grid->_max_coord, sizeof(Vector3), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMalloc((void**)&dev_xm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_ym, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_zm, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
int imsize = _x * _y * 3;
cudaStatus = cudaMalloc((void**)&dev_img, imsize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&dev_T, _volume->_grid->getSize() * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&dev_le, LeSize * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&dev_l, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
cudaStatus = cudaMalloc((void**)&dev_le_mean, SPEC_TOTAL_SAMPLES * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_T, temperature_grid, _volume->_grid->getSize() * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dev_le_mean, LeMean, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dev_le, Le, LeSize * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dev_l, L, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dev_xm, xm, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dev_ym, ym, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
cudaStatus = cudaMemcpy(dev_zm, zm, SPEC_TOTAL_SAMPLES * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
;
}
// call kernel
std::cout << "start kernel... " << std::endl;
dim3 color_grid(32, 32);
//dim3 color_block(SPEC_TOTAL_SAMPLES);
dim3 color_block(16, 16);
cudaEvent_t start, stop;
float elapse_time;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
colorKernel << <color_grid, color_block >> > (SPEC_TOTAL_SAMPLES, QUALITY_ROOM, _volume->LeScale, SPEC_SAMPLE_STEP, CHROMA, dev_le, dev_l, dev_le_mean, dev_T, dev_img, dev_xm, dev_ym, dev_zm, d_eyepos, d_forward, d_right, d_up, dev_minCoord, dev_maxCoord);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapse_time, start, stop);
printf("Elapsed time : %f ms\n", elapse_time);
// cudaError_t cudaStatus;
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "colorKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
//cudaStatus = cudaDeviceSynchronize();
//if (cudaStatus != cudaSuccess) {
/* fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching colorKernel!\n", cudaStatus);
;
}*/
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(image, dev_img, IMGSIZE * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
/*cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
}*/
return image;
}
#endif
|
fe6f8d606aeecf15eaaffabffd2fb6c17f42a7c6.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of the Marian toolkit.
// Marian is copyright (c) 2016 Marcin Junczys-Dowmunt.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "expression_operators.h"
#include "node_operators.h"
namespace marian {
Expr named(Expr a, const std::string& name) {
a.graph()->add_named_node(a, name);
return a;
}
Expr logit(Expr a) {
return Expr(a.graph(), new LogitNodeOp(a));
}
Expr tanh(Expr a) {
return Expr(a.graph(), new TanhNodeOp(a));
}
Expr relu(Expr a) {
return Expr(a.graph(), new ReLUNodeOp(a));
}
Expr dropout(Expr a) {
return Expr(a.graph(), new DropoutNodeOp(a));
}
Expr log(Expr a) {
return Expr(a.graph(), new LogNodeOp(a));
};
Expr exp(Expr a) {
return Expr(a.graph(), new ExpNodeOp(a));
};
Expr operator-(Expr a) {
return Expr(a.graph(), new NegNodeOp(a));
};
Expr softmax(Expr a) {
return Expr(a.graph(), new SoftmaxNodeOp(a));
}
Expr argmax(Expr a) {
return Expr(a.graph(), new ArgmaxNodeOp(a));
}
/*********************************************************/
static Shape newShape(ChainPtr a, ChainPtr b) {
size_t dimsA = a->shape().size();
size_t dimsB = b->shape().size();
UTIL_THROW_IF2(dimsA != dimsB,
"Tensors have different numbers of dimensions");
Shape shape(dimsA);
for(size_t i = 0; i < dimsA; ++i) {
int dimA = a->shape()[i];
int dimB = b->shape()[i];
bool broadcastable = (dimA == dimB || dimA == 1 || dimB == 1);
UTIL_THROW_IF2(!broadcastable, "Different dimensions in elementwise "
<< "operation cannot be broadcasted: " << dimA << " != " << dimB);
shape[i] = ::max(dimA, dimB);
if(dimA == whatevs || dimB == whatevs)
shape[i] = whatevs;
}
return shape;
}
Expr broadcast(Shape bShape, Expr a) {
const Shape& aShape = a.node()->shape();
if(aShape == bShape) {
return a;
}
else {
size_t dimsA = aShape.size();
size_t dimsB = bShape.size();
UTIL_THROW_IF2(dimsA != dimsB,
"Tensor and shape have different number of dimensions");
for(size_t i = 0; i < dimsA; ++i) {
int dimA = aShape[i];
int dimB = bShape[i];
bool broadcastable = (dimA == dimB || dimA == 1);
UTIL_THROW_IF2(!broadcastable,
"Cannot broadcast tensor dimension "
<< dimA << " to " << dimB);
if(dimA == 1 && dimB != 1) {
if(i == 0) {
Expr one = a.graph()->ones(keywords::shape={bShape[0], 1});
a = dot(one, a);
}
else if(i == 1) {
Expr one = a.graph()->ones(keywords::shape={1, bShape[1]});
a = dot(a, one);
}
else {
UTIL_THROW2("Not implemented");
}
}
}
return a;
}
}
Expr operator+(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new PlusNodeOp(cast_a, cast_b));
}
Expr operator-(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new MinusNodeOp(cast_a, cast_b));
}
Expr operator*(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new MultNodeOp(cast_a, cast_b));
}
Expr operator/(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new DivNodeOp(cast_a, cast_b));
}
Expr dot(Expr a, Expr b) {
return Expr(a.graph(), new DotNodeOp(a, b));
}
Expr cross_entropy(Expr a, Expr b) {
return Expr(a.graph(), new CrossEntropyNodeOp(a, b));
}
}
| fe6f8d606aeecf15eaaffabffd2fb6c17f42a7c6.cu | // This file is part of the Marian toolkit.
// Marian is copyright (c) 2016 Marcin Junczys-Dowmunt.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "expression_operators.h"
#include "node_operators.h"
namespace marian {
Expr named(Expr a, const std::string& name) {
a.graph()->add_named_node(a, name);
return a;
}
Expr logit(Expr a) {
return Expr(a.graph(), new LogitNodeOp(a));
}
Expr tanh(Expr a) {
return Expr(a.graph(), new TanhNodeOp(a));
}
Expr relu(Expr a) {
return Expr(a.graph(), new ReLUNodeOp(a));
}
Expr dropout(Expr a) {
return Expr(a.graph(), new DropoutNodeOp(a));
}
Expr log(Expr a) {
return Expr(a.graph(), new LogNodeOp(a));
};
Expr exp(Expr a) {
return Expr(a.graph(), new ExpNodeOp(a));
};
Expr operator-(Expr a) {
return Expr(a.graph(), new NegNodeOp(a));
};
Expr softmax(Expr a) {
return Expr(a.graph(), new SoftmaxNodeOp(a));
}
Expr argmax(Expr a) {
return Expr(a.graph(), new ArgmaxNodeOp(a));
}
/*********************************************************/
static Shape newShape(ChainPtr a, ChainPtr b) {
size_t dimsA = a->shape().size();
size_t dimsB = b->shape().size();
UTIL_THROW_IF2(dimsA != dimsB,
"Tensors have different numbers of dimensions");
Shape shape(dimsA);
for(size_t i = 0; i < dimsA; ++i) {
int dimA = a->shape()[i];
int dimB = b->shape()[i];
bool broadcastable = (dimA == dimB || dimA == 1 || dimB == 1);
UTIL_THROW_IF2(!broadcastable, "Different dimensions in elementwise "
<< "operation cannot be broadcasted: " << dimA << " != " << dimB);
shape[i] = std::max(dimA, dimB);
if(dimA == whatevs || dimB == whatevs)
shape[i] = whatevs;
}
return shape;
}
Expr broadcast(Shape bShape, Expr a) {
const Shape& aShape = a.node()->shape();
if(aShape == bShape) {
return a;
}
else {
size_t dimsA = aShape.size();
size_t dimsB = bShape.size();
UTIL_THROW_IF2(dimsA != dimsB,
"Tensor and shape have different number of dimensions");
for(size_t i = 0; i < dimsA; ++i) {
int dimA = aShape[i];
int dimB = bShape[i];
bool broadcastable = (dimA == dimB || dimA == 1);
UTIL_THROW_IF2(!broadcastable,
"Cannot broadcast tensor dimension "
<< dimA << " to " << dimB);
if(dimA == 1 && dimB != 1) {
if(i == 0) {
Expr one = a.graph()->ones(keywords::shape={bShape[0], 1});
a = dot(one, a);
}
else if(i == 1) {
Expr one = a.graph()->ones(keywords::shape={1, bShape[1]});
a = dot(a, one);
}
else {
UTIL_THROW2("Not implemented");
}
}
}
return a;
}
}
Expr operator+(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new PlusNodeOp(cast_a, cast_b));
}
Expr operator-(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new MinusNodeOp(cast_a, cast_b));
}
Expr operator*(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new MultNodeOp(cast_a, cast_b));
}
Expr operator/(Expr a, Expr b) {
Shape shape = newShape(a, b);
Expr cast_a = broadcast(shape, a);
Expr cast_b = broadcast(shape, b);
return Expr(a.graph(), new DivNodeOp(cast_a, cast_b));
}
Expr dot(Expr a, Expr b) {
return Expr(a.graph(), new DotNodeOp(a, b));
}
Expr cross_entropy(Expr a, Expr b) {
return Expr(a.graph(), new CrossEntropyNodeOp(a, b));
}
}
|
f667d2c0b50b9ac90dbcc5ba4d51803a84229938.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <vector>
#include "math_constants.h"
#include "header.h"
constexpr int PULSESPERBLOCK = 4;
constexpr int FIT_THREADSPERBLOCK = PULSESPERBLOCK * SAMPLESPERFIT;
__device__ phaseMap d_phase_maps[NSEGMENTS];
__device__ pulseTemplate d_templates[NSEGMENTS];
__global__ void find_times(const short* trace,
pulseFinderResultCollection* resultcol, short threshold, short polarity) {
for (uint segment_num = 0; segment_num < NSEGMENTS; ++segment_num) {
// find index based on global id
uint sample_num = blockIdx.x * blockDim.x + threadIdx.x;
uint trace_index = segment_num * TRACELEN + sample_num;
// don't continue if your trace_index is out of bounds for pulse fitting
if (sample_num < MINFITTIME ||
sample_num >= TRACELEN - (SAMPLESPERFIT - PEAKINDEXINFIT - 1)) {
return;
}
// we need the samples at this point and surrounding (left, middle, right)
short m = polarity * trace[trace_index];
// if this sample is a local minimum and is over threshold, record it
if (m > polarity * threshold) {
short l = polarity * trace[trace_index - 1];
short r = polarity * trace[trace_index + 1];
// must be local minimum, but since we have digital ADC values
// we must allow for max to equal sample on left, but not on right
// if we allow max sample to equal right, we will fit same pulse twice
if ((m >= l) && (m > r)) {
uint pulse_index = atomicAdd(&resultcol[segment_num].nPulses, 1);
if (pulse_index < OUTPUTARRAYLEN) {
// find pulse time, phase
// first calculate pseudo time
float numerator = l - m;
float denominator = r - m;
// denominator can't be zero because m > r
float ptime = 2.0 / CUDART_PI_F * atan(numerator / denominator);
// next interpolate time map table
float where = ptime * (NPOINTSPHASEMAP - 1);
int low_index = floor(where);
float weight_heigh = where - low_index;
float real_time;
// check for out of bounds
if (low_index < 0) {
real_time = 0;
} else if (low_index >= NPOINTSPHASEMAP - 1) {
real_time = 1.0;
} else {
// do the interpolation
real_time =
d_phase_maps[segment_num].table[low_index] *
(1 - weight_heigh) +
d_phase_maps[segment_num].table[low_index + 1] * weight_heigh;
}
// record time, phase, peak index, peak value
float time_offset = d_phase_maps[segment_num].timeOffset;
resultcol[segment_num].fit_results[pulse_index].time =
sample_num + real_time - 0.5 - time_offset;
resultcol[segment_num].fit_results[pulse_index].phase = 1 - real_time;
resultcol[segment_num].fit_results[pulse_index].peak_index =
sample_num;
resultcol[segment_num].fit_results[pulse_index].peak_value = m;
} else {
// beyond limit for number of pulses we're trying to fit
atomicSub(&resultcol[segment_num].nPulses, 1);
}
}
}
}
}
// according to info on nvidia.com, no need to explicitly synchronize threads
// within groups of 32
// because warps are 32 threads and instructions in warp are always synchronous
__global__ void fit_pulses(const short* trace,
pulseFinderResultCollection* resultcol) {
// arrays for accumulation
__shared__ float tSum[FIT_THREADSPERBLOCK];
__shared__ float dSum[FIT_THREADSPERBLOCK];
__shared__ float dDotT[FIT_THREADSPERBLOCK];
__shared__ float tDotT[FIT_THREADSPERBLOCK];
unsigned int segment_num = blockIdx.y;
unsigned int pulse_num =
blockIdx.x * PULSESPERBLOCK + threadIdx.x / SAMPLESPERFIT;
// return asap if this pulse doesn't exit
if ((pulse_num >= OUTPUTARRAYLEN) ||
(pulse_num >= resultcol[segment_num].nPulses)) {
return;
}
// step one: read needed inputs from resultcol
float phase = resultcol[segment_num].fit_results[pulse_num].phase;
unsigned int start_sample =
segment_num * TRACELEN +
resultcol[segment_num].fit_results[pulse_num].peak_index - PEAKINDEXINFIT;
// step two: read in template values for this phase and sample num
unsigned int sample_index = threadIdx.x % SAMPLESPERFIT;
float phase_loc = phase * POINTSPERSAMPLE;
int phase_index = floor(phase_loc);
float weight_high = phase_loc - phase_index;
// make sure we're in bounds
if (phase_index < 0) {
phase_index = 0;
weight_high = 0;
} else if (phase_index >= POINTSPERSAMPLE) {
phase_index = POINTSPERSAMPLE - 1;
weight_high = 1;
}
unsigned int low_index = phase_index * SAMPLESPERFIT + sample_index;
unsigned int high_index = low_index + SAMPLESPERFIT;
float low_value = d_templates[segment_num].table[low_index];
float high_value = d_templates[segment_num].table[high_index];
// step 2.5 evaluate template
float t_i = low_value * (1 - weight_high) + high_value * weight_high;
// step three : read in pulse value
float d_i = trace[start_sample + sample_index];
// step four: prepare accumulation/reduction arrays
tSum[threadIdx.x] = t_i;
dSum[threadIdx.x] = d_i;
dDotT[threadIdx.x] = d_i * t_i;
tDotT[threadIdx.x] = t_i * t_i;
// step five: accumulate, note that explicit synchronization
// is not required because all accumulation is done within a warp
// it seems like this stops working if the if and for are inverted, so don't
// do that
for (unsigned int stride = 16; stride >= 1; stride /= 2) {
if (sample_index < 16) {
tSum[threadIdx.x] += tSum[threadIdx.x + stride];
dSum[threadIdx.x] += dSum[threadIdx.x + stride];
dDotT[threadIdx.x] += dDotT[threadIdx.x + stride];
tDotT[threadIdx.x] += tDotT[threadIdx.x + stride];
}
}
// step six : calculate pedestal, energy
// read final accumulated results
int result_index = (threadIdx.x / SAMPLESPERFIT) * SAMPLESPERFIT;
float tSumFinal = tSum[result_index];
float dSumFinal = dSum[result_index];
float dDotTFinal = dDotT[result_index];
float tDotTFinal = tDotT[result_index];
float denomRecip = 1.0 / (tSumFinal * tSumFinal - SAMPLESPERFIT * tDotTFinal);
float energy =
denomRecip * (dSumFinal * tSumFinal - SAMPLESPERFIT * dDotTFinal);
float pedestal =
denomRecip * (dDotTFinal * tSumFinal - dSumFinal * tDotTFinal);
// step seven: load partial chi^2s into shared memory
__shared__ float chi2sum[FIT_THREADSPERBLOCK];
float residual_i = d_i - energy * t_i - pedestal;
chi2sum[threadIdx.x] = residual_i * residual_i;
// step eight: accumulate partial chi2s
for (unsigned int stride = 16; stride >= 1; stride /= 2) {
if (sample_index < 16) {
chi2sum[threadIdx.x] += chi2sum[threadIdx.x + stride];
}
}
// final step: record results
// force energy positive
if (energy < 0) {
energy = energy * -1;
}
if (sample_index == 0) {
resultcol[segment_num].fit_results[pulse_num].energy = energy;
resultcol[segment_num].fit_results[pulse_num].pedestal = pedestal;
resultcol[segment_num].fit_results[pulse_num].chi2 = chi2sum[threadIdx.x];
}
}
// some global variables to keep track of state easily
short* device_trace = nullptr;
pulseFinderResultCollection* device_result = nullptr;
hipDeviceProp_t prop;
// set device and malloc buffers
void gpu_init(std::vector<phaseMap>& phaseMaps,
std::vector<pulseTemplate>& pulseTemplates) {
int count;
hipGetDeviceCount(&count);
std::cout << "there are " << count << " devices available." << std::endl;
hipSetDevice(0);
hipGetDeviceProperties(&prop, 0);
std::cout << "using gpu " << prop.name << std::endl;
hipMalloc((void**)&device_trace, trace_size);
hipMalloc((void**)&device_result, result_size);
hipMemcpyToSymbol(d_phase_maps, (void*)phaseMaps.data(), phase_maps_size);
hipMemcpyToSymbol(d_templates, (void*)pulseTemplates.data(), templates_size);
}
// free buffers, close gpu
void gpu_close() {
if (device_trace) {
std::cout << "freeing device trace" << std::endl;
hipFree(device_trace);
}
if (device_result) {
std::cout << "freeing device result" << std::endl;
hipFree(device_result);
}
hipDeviceReset();
}
void gpu_process(short* host_trace, pulseFinderResultCollection* result_buff) {
// copy data to gpu
auto fullGpuStart = std::chrono::high_resolution_clock::now();
auto start = std::chrono::high_resolution_clock::now();
hipMemcpy(device_trace, host_trace, trace_size, hipMemcpyHostToDevice);
// zero results
hipMemset(device_result, 0, result_size);
auto end = std::chrono::high_resolution_clock::now();
std::cout << "time for write to GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
// launch job
// 128 seems to give best performance
int threadsPerBlock = 128;
dim3 dimBlock(threadsPerBlock);
std::cout << "threads per block: " << threadsPerBlock << std::endl;
int blocks_per_trace =
::ceil(static_cast<double>(TRACELEN) / threadsPerBlock);
std::cout << "blocks per trace: " << blocks_per_trace << std::endl;
std::cout << "threads per trace: " << blocks_per_trace* threadsPerBlock
<< std::endl;
dim3 dimGrid(blocks_per_trace);
start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( find_times), dim3(dimGrid), dim3(dimBlock), 0, 0, device_trace, device_result, THRESHOLD, -1);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
std::cout << "time for find pulses computation on GPU : "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
std::cout << "TRYING FIT" << std::endl;
start = std::chrono::high_resolution_clock::now();
dimGrid = dim3(OUTPUTARRAYLEN / PULSESPERBLOCK, NSEGMENTS);
dimBlock = dim3(SAMPLESPERFIT * PULSESPERBLOCK, 1);
hipLaunchKernelGGL(( fit_pulses), dim3(dimGrid), dim3(dimBlock), 0, 0, device_trace, device_result);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
std::cout << "time for fit pulses computation on GPU : "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
// get data from gpu, finish up
start = std::chrono::high_resolution_clock::now();
hipMemcpy((void*)result_buff, device_result, result_size,
hipMemcpyDeviceToHost);
end = std::chrono::high_resolution_clock::now();
auto fullGpuEnd = end;
std::cout << "time read out results from GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
std::cout << "time from before write to gpu to after read from gpu: "
<< std::chrono::duration_cast<std::chrono::microseconds>(
fullGpuEnd - fullGpuStart).count() << " microseconds"
<< std::endl
<< std::endl;
}
| f667d2c0b50b9ac90dbcc5ba4d51803a84229938.cu | #include <stdio.h>
#include <iostream>
#include <chrono>
#include <vector>
#include "math_constants.h"
#include "header.h"
constexpr int PULSESPERBLOCK = 4;
constexpr int FIT_THREADSPERBLOCK = PULSESPERBLOCK * SAMPLESPERFIT;
__device__ phaseMap d_phase_maps[NSEGMENTS];
__device__ pulseTemplate d_templates[NSEGMENTS];
__global__ void find_times(const short* trace,
pulseFinderResultCollection* resultcol, short threshold, short polarity) {
for (uint segment_num = 0; segment_num < NSEGMENTS; ++segment_num) {
// find index based on global id
uint sample_num = blockIdx.x * blockDim.x + threadIdx.x;
uint trace_index = segment_num * TRACELEN + sample_num;
// don't continue if your trace_index is out of bounds for pulse fitting
if (sample_num < MINFITTIME ||
sample_num >= TRACELEN - (SAMPLESPERFIT - PEAKINDEXINFIT - 1)) {
return;
}
// we need the samples at this point and surrounding (left, middle, right)
short m = polarity * trace[trace_index];
// if this sample is a local minimum and is over threshold, record it
if (m > polarity * threshold) {
short l = polarity * trace[trace_index - 1];
short r = polarity * trace[trace_index + 1];
// must be local minimum, but since we have digital ADC values
// we must allow for max to equal sample on left, but not on right
// if we allow max sample to equal right, we will fit same pulse twice
if ((m >= l) && (m > r)) {
uint pulse_index = atomicAdd(&resultcol[segment_num].nPulses, 1);
if (pulse_index < OUTPUTARRAYLEN) {
// find pulse time, phase
// first calculate pseudo time
float numerator = l - m;
float denominator = r - m;
// denominator can't be zero because m > r
float ptime = 2.0 / CUDART_PI_F * atan(numerator / denominator);
// next interpolate time map table
float where = ptime * (NPOINTSPHASEMAP - 1);
int low_index = floor(where);
float weight_heigh = where - low_index;
float real_time;
// check for out of bounds
if (low_index < 0) {
real_time = 0;
} else if (low_index >= NPOINTSPHASEMAP - 1) {
real_time = 1.0;
} else {
// do the interpolation
real_time =
d_phase_maps[segment_num].table[low_index] *
(1 - weight_heigh) +
d_phase_maps[segment_num].table[low_index + 1] * weight_heigh;
}
// record time, phase, peak index, peak value
float time_offset = d_phase_maps[segment_num].timeOffset;
resultcol[segment_num].fit_results[pulse_index].time =
sample_num + real_time - 0.5 - time_offset;
resultcol[segment_num].fit_results[pulse_index].phase = 1 - real_time;
resultcol[segment_num].fit_results[pulse_index].peak_index =
sample_num;
resultcol[segment_num].fit_results[pulse_index].peak_value = m;
} else {
// beyond limit for number of pulses we're trying to fit
atomicSub(&resultcol[segment_num].nPulses, 1);
}
}
}
}
}
// according to info on nvidia.com, no need to explicitly synchronize threads
// within groups of 32
// because warps are 32 threads and instructions in warp are always synchronous
__global__ void fit_pulses(const short* trace,
pulseFinderResultCollection* resultcol) {
// arrays for accumulation
__shared__ float tSum[FIT_THREADSPERBLOCK];
__shared__ float dSum[FIT_THREADSPERBLOCK];
__shared__ float dDotT[FIT_THREADSPERBLOCK];
__shared__ float tDotT[FIT_THREADSPERBLOCK];
unsigned int segment_num = blockIdx.y;
unsigned int pulse_num =
blockIdx.x * PULSESPERBLOCK + threadIdx.x / SAMPLESPERFIT;
// return asap if this pulse doesn't exit
if ((pulse_num >= OUTPUTARRAYLEN) ||
(pulse_num >= resultcol[segment_num].nPulses)) {
return;
}
// step one: read needed inputs from resultcol
float phase = resultcol[segment_num].fit_results[pulse_num].phase;
unsigned int start_sample =
segment_num * TRACELEN +
resultcol[segment_num].fit_results[pulse_num].peak_index - PEAKINDEXINFIT;
// step two: read in template values for this phase and sample num
unsigned int sample_index = threadIdx.x % SAMPLESPERFIT;
float phase_loc = phase * POINTSPERSAMPLE;
int phase_index = floor(phase_loc);
float weight_high = phase_loc - phase_index;
// make sure we're in bounds
if (phase_index < 0) {
phase_index = 0;
weight_high = 0;
} else if (phase_index >= POINTSPERSAMPLE) {
phase_index = POINTSPERSAMPLE - 1;
weight_high = 1;
}
unsigned int low_index = phase_index * SAMPLESPERFIT + sample_index;
unsigned int high_index = low_index + SAMPLESPERFIT;
float low_value = d_templates[segment_num].table[low_index];
float high_value = d_templates[segment_num].table[high_index];
// step 2.5 evaluate template
float t_i = low_value * (1 - weight_high) + high_value * weight_high;
// step three : read in pulse value
float d_i = trace[start_sample + sample_index];
// step four: prepare accumulation/reduction arrays
tSum[threadIdx.x] = t_i;
dSum[threadIdx.x] = d_i;
dDotT[threadIdx.x] = d_i * t_i;
tDotT[threadIdx.x] = t_i * t_i;
// step five: accumulate, note that explicit synchronization
// is not required because all accumulation is done within a warp
// it seems like this stops working if the if and for are inverted, so don't
// do that
for (unsigned int stride = 16; stride >= 1; stride /= 2) {
if (sample_index < 16) {
tSum[threadIdx.x] += tSum[threadIdx.x + stride];
dSum[threadIdx.x] += dSum[threadIdx.x + stride];
dDotT[threadIdx.x] += dDotT[threadIdx.x + stride];
tDotT[threadIdx.x] += tDotT[threadIdx.x + stride];
}
}
// step six : calculate pedestal, energy
// read final accumulated results
int result_index = (threadIdx.x / SAMPLESPERFIT) * SAMPLESPERFIT;
float tSumFinal = tSum[result_index];
float dSumFinal = dSum[result_index];
float dDotTFinal = dDotT[result_index];
float tDotTFinal = tDotT[result_index];
float denomRecip = 1.0 / (tSumFinal * tSumFinal - SAMPLESPERFIT * tDotTFinal);
float energy =
denomRecip * (dSumFinal * tSumFinal - SAMPLESPERFIT * dDotTFinal);
float pedestal =
denomRecip * (dDotTFinal * tSumFinal - dSumFinal * tDotTFinal);
// step seven: load partial chi^2s into shared memory
__shared__ float chi2sum[FIT_THREADSPERBLOCK];
float residual_i = d_i - energy * t_i - pedestal;
chi2sum[threadIdx.x] = residual_i * residual_i;
// step eight: accumulate partial chi2s
for (unsigned int stride = 16; stride >= 1; stride /= 2) {
if (sample_index < 16) {
chi2sum[threadIdx.x] += chi2sum[threadIdx.x + stride];
}
}
// final step: record results
// force energy positive
if (energy < 0) {
energy = energy * -1;
}
if (sample_index == 0) {
resultcol[segment_num].fit_results[pulse_num].energy = energy;
resultcol[segment_num].fit_results[pulse_num].pedestal = pedestal;
resultcol[segment_num].fit_results[pulse_num].chi2 = chi2sum[threadIdx.x];
}
}
// some global variables to keep track of state easily
short* device_trace = nullptr;
pulseFinderResultCollection* device_result = nullptr;
cudaDeviceProp prop;
// set device and malloc buffers
void gpu_init(std::vector<phaseMap>& phaseMaps,
std::vector<pulseTemplate>& pulseTemplates) {
int count;
cudaGetDeviceCount(&count);
std::cout << "there are " << count << " devices available." << std::endl;
cudaSetDevice(0);
cudaGetDeviceProperties(&prop, 0);
std::cout << "using gpu " << prop.name << std::endl;
cudaMalloc((void**)&device_trace, trace_size);
cudaMalloc((void**)&device_result, result_size);
cudaMemcpyToSymbol(d_phase_maps, (void*)phaseMaps.data(), phase_maps_size);
cudaMemcpyToSymbol(d_templates, (void*)pulseTemplates.data(), templates_size);
}
// free buffers, close gpu
void gpu_close() {
if (device_trace) {
std::cout << "freeing device trace" << std::endl;
cudaFree(device_trace);
}
if (device_result) {
std::cout << "freeing device result" << std::endl;
cudaFree(device_result);
}
cudaDeviceReset();
}
void gpu_process(short* host_trace, pulseFinderResultCollection* result_buff) {
// copy data to gpu
auto fullGpuStart = std::chrono::high_resolution_clock::now();
auto start = std::chrono::high_resolution_clock::now();
cudaMemcpy(device_trace, host_trace, trace_size, cudaMemcpyHostToDevice);
// zero results
cudaMemset(device_result, 0, result_size);
auto end = std::chrono::high_resolution_clock::now();
std::cout << "time for write to GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
// launch job
// 128 seems to give best performance
int threadsPerBlock = 128;
dim3 dimBlock(threadsPerBlock);
std::cout << "threads per block: " << threadsPerBlock << std::endl;
int blocks_per_trace =
std::ceil(static_cast<double>(TRACELEN) / threadsPerBlock);
std::cout << "blocks per trace: " << blocks_per_trace << std::endl;
std::cout << "threads per trace: " << blocks_per_trace* threadsPerBlock
<< std::endl;
dim3 dimGrid(blocks_per_trace);
start = std::chrono::high_resolution_clock::now();
find_times<<<dimGrid, dimBlock>>> (device_trace, device_result, THRESHOLD, -1);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
std::cout << "time for find pulses computation on GPU : "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
std::cout << "TRYING FIT" << std::endl;
start = std::chrono::high_resolution_clock::now();
dimGrid = dim3(OUTPUTARRAYLEN / PULSESPERBLOCK, NSEGMENTS);
dimBlock = dim3(SAMPLESPERFIT * PULSESPERBLOCK, 1);
fit_pulses<<<dimGrid, dimBlock>>> (device_trace, device_result);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
std::cout << "time for fit pulses computation on GPU : "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
// get data from gpu, finish up
start = std::chrono::high_resolution_clock::now();
cudaMemcpy((void*)result_buff, device_result, result_size,
cudaMemcpyDeviceToHost);
end = std::chrono::high_resolution_clock::now();
auto fullGpuEnd = end;
std::cout << "time read out results from GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(
end - start).count() << " microseconds" << std::endl
<< std::endl;
std::cout << "time from before write to gpu to after read from gpu: "
<< std::chrono::duration_cast<std::chrono::microseconds>(
fullGpuEnd - fullGpuStart).count() << " microseconds"
<< std::endl
<< std::endl;
}
|
09c5b30a1cdb5305b5bacccefd39c4ab109fc869.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef FWT_KERNEL_CU
#define FWT_KERNEL_CU
#include "util.h"
///////////////////////////////////////////////////////////////////////////////
// Elementary(for vectors less than elementary size) in-shared memory
// combined radix-2 + radix-4 Fast Walsh Transform
///////////////////////////////////////////////////////////////////////////////
#define ELEMENTARY_LOG2SIZE 11
__global__ void fwtBatch1Kernel(double *d_Output, double *d_Input, int log2N) {
const int N = 1 << log2N;
int stride = N;
const int base = blockIdx.x << log2N;
// (2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80
extern __shared__ double s_data[];
double *d_Src = d_Input + base;
double *d_Dst = d_Output + base;
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
s_data[pos] = d_Src[pos];
//Do single radix-2 stage if for odd power
if(log2N & 1){
__syncthreads();
stride >>= 1;
for(int pos = threadIdx.x; pos < N / 2; pos += blockDim.x){
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 1) + lo;
int i1 = i0 + stride;
double t0 = s_data[i0];
double t1 = s_data[i1];
s_data[i0] = t0 + t1;
s_data[i1] = t0 - t1;
}
}
//Main radix4 stages
stride >>= 2;
int pos = threadIdx.x;
for(; stride >= 1; stride >>= 2){
__syncthreads();
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
double d0 = s_data[i0];
double d1 = s_data[i1];
double d2 = s_data[i2];
double d3 = s_data[i3];
double t;
t = d0; d0 = d0 + d2; d2 = t - d2;
t = d1; d1 = d1 + d3; d3 = t - d3;
t = d0; s_data[i0] = d0 + d1; s_data[i1] = t - d1;
t = d2; s_data[i2] = d2 + d3; s_data[i3] = t - d3;
}
__syncthreads();
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
d_Dst[pos] = s_data[pos];
}
__global__ void fwtBatch1Kernel(float *d_Output, float *d_Input, int log2N) {
const int N = 1 << log2N;
int stride = N;
const int base = blockIdx.x << log2N;
// (2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80
extern __shared__ float s_data_rp[];
float *d_Src = d_Input + base;
float *d_Dst = d_Output + base;
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
s_data_rp[pos] = d_Src[pos];
//Do single radix-2 stage if for odd power
if(log2N & 1){
__syncthreads();
stride >>= 1;
for(int pos = threadIdx.x; pos < N / 2; pos += blockDim.x){
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 1) + lo;
int i1 = i0 + stride;
float t0 = s_data_rp[i0];
float t1 = s_data_rp[i1];
s_data_rp[i0] = t0 + t1;
s_data_rp[i1] = t0 - t1;
}
}
//Main radix4 stages
stride >>= 2;
int pos = threadIdx.x;
for(; stride >= 1; stride >>= 2){
__syncthreads();
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
float d0 = s_data_rp[i0];
float d1 = s_data_rp[i1];
float d2 = s_data_rp[i2];
float d3 = s_data_rp[i3];
float t;
t = d0; d0 = d0 + d2; d2 = t - d2;
t = d1; d1 = d1 + d3; d3 = t - d3;
t = d0; s_data_rp[i0] = d0 + d1; s_data_rp[i1] = t - d1;
t = d2; s_data_rp[i2] = d2 + d3; s_data_rp[i3] = t - d3;
}
__syncthreads();
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
d_Dst[pos] = s_data_rp[pos];
}
////////////////////////////////////////////////////////////////////////////////
// Single in-global memory radix-4 Fast Walsh Transform pass
// (for strides exceeding elementary vector size)
////////////////////////////////////////////////////////////////////////////////
template<typename real_t>
__global__ void fwtBatch2Kernel(real_t *d_Output, real_t *d_Input, int stride) {
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int N = blockDim.x * gridDim.x * 4;
real_t *d_Src = d_Input + blockIdx.y * N;
real_t *d_Dst = d_Output + blockIdx.y * N;
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
real_t d0 = d_Src[i0];
real_t d1 = d_Src[i1];
real_t d2 = d_Src[i2];
real_t d3 = d_Src[i3];
real_t t;
t = d0; d0 = d0 + d2; d2 = t - d2;
t = d1; d1 = d1 + d3; d3 = t - d3;
t = d0; d_Dst[i0] = d0 + d1; d_Dst[i1] = t - d1;
t = d2; d_Dst[i2] = d2 + d3; d_Dst[i3] = t - d3;
}
////////////////////////////////////////////////////////////////////////////////
// Put everything together: batched Fast Walsh Transform CPU front-end
////////////////////////////////////////////////////////////////////////////////
template<typename real_t>
void fwtBatchGPUTemplate(real_t *d_Data, int M, int log2N, hipStream_t stream) {
int N = 1 << log2N;
dim3 grid((1 << log2N) / 1024, M, 1);
for(; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2){
hipLaunchKernelGGL(( fwtBatch2Kernel), dim3(grid), dim3(256), 0, stream, d_Data, d_Data, N / 4);
CHECK_CUDA_ERROR(hipPeekAtLastError());
}
hipLaunchKernelGGL(( fwtBatch1Kernel), dim3(M), dim3(N / 4), N * sizeof(real_t), stream, d_Data, d_Data, log2N);
CHECK_CUDA_ERROR(hipPeekAtLastError());
}
void fwtBatchGPU(double *d_Data, int M, int log2N, hipStream_t stream) {
fwtBatchGPUTemplate(d_Data, M, log2N, stream);
}
void fwtBatchGPU(float *d_Data, int M, int log2N, hipStream_t stream) {
fwtBatchGPUTemplate(d_Data, M, log2N, stream);
}
////////////////////////////////////////////////////////////////////////////////
// Modulate two arrays
////////////////////////////////////////////////////////////////////////////////
template<typename real_t>
__global__ void modulateKernel(real_t *d_A, real_t *d_B, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numThreads = blockDim.x * gridDim.x;
real_t rcpN = 1.0f / (real_t)N;
for (int pos = tid; pos < N; pos += numThreads)
d_A[pos] *= d_B[pos] * rcpN;
}
// Interface to modulateKernel()
void modulateGPU(double *d_A, double *d_B, int N, hipStream_t stream) {
hipLaunchKernelGGL(( modulateKernel), dim3(128), dim3(256), 0, stream, d_A, d_B, N);
}
void modulateGPU(float *d_A, float *d_B, int N, hipStream_t stream) {
hipLaunchKernelGGL(( modulateKernel), dim3(128), dim3(256), 0, stream, d_A, d_B, N);
}
////////////////////////////////////////////////////////////////////////////////
// Duplicate input
////////////////////////////////////////////////////////////////////////////////
__global__ void duplicate_input_kernel(double *input, float *input_rp, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
input_rp[i] = input[i];
}
}
void duplicate_input_gpu(double *input, float *input_rp, int N) {
int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( duplicate_input_kernel), dim3(gridDim), dim3(BLOCK_SIZE), 0, 0, input, input_rp, N);
CHECK_CUDA_ERROR(hipPeekAtLastError());
}
#endif
| 09c5b30a1cdb5305b5bacccefd39c4ab109fc869.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#ifndef FWT_KERNEL_CU
#define FWT_KERNEL_CU
#include "util.h"
///////////////////////////////////////////////////////////////////////////////
// Elementary(for vectors less than elementary size) in-shared memory
// combined radix-2 + radix-4 Fast Walsh Transform
///////////////////////////////////////////////////////////////////////////////
#define ELEMENTARY_LOG2SIZE 11
__global__ void fwtBatch1Kernel(double *d_Output, double *d_Input, int log2N) {
const int N = 1 << log2N;
int stride = N;
const int base = blockIdx.x << log2N;
// (2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80
extern __shared__ double s_data[];
double *d_Src = d_Input + base;
double *d_Dst = d_Output + base;
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
s_data[pos] = d_Src[pos];
//Do single radix-2 stage if for odd power
if(log2N & 1){
__syncthreads();
stride >>= 1;
for(int pos = threadIdx.x; pos < N / 2; pos += blockDim.x){
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 1) + lo;
int i1 = i0 + stride;
double t0 = s_data[i0];
double t1 = s_data[i1];
s_data[i0] = t0 + t1;
s_data[i1] = t0 - t1;
}
}
//Main radix4 stages
stride >>= 2;
int pos = threadIdx.x;
for(; stride >= 1; stride >>= 2){
__syncthreads();
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
double d0 = s_data[i0];
double d1 = s_data[i1];
double d2 = s_data[i2];
double d3 = s_data[i3];
double t;
t = d0; d0 = d0 + d2; d2 = t - d2;
t = d1; d1 = d1 + d3; d3 = t - d3;
t = d0; s_data[i0] = d0 + d1; s_data[i1] = t - d1;
t = d2; s_data[i2] = d2 + d3; s_data[i3] = t - d3;
}
__syncthreads();
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
d_Dst[pos] = s_data[pos];
}
__global__ void fwtBatch1Kernel(float *d_Output, float *d_Input, int log2N) {
const int N = 1 << log2N;
int stride = N;
const int base = blockIdx.x << log2N;
// (2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80
extern __shared__ float s_data_rp[];
float *d_Src = d_Input + base;
float *d_Dst = d_Output + base;
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
s_data_rp[pos] = d_Src[pos];
//Do single radix-2 stage if for odd power
if(log2N & 1){
__syncthreads();
stride >>= 1;
for(int pos = threadIdx.x; pos < N / 2; pos += blockDim.x){
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 1) + lo;
int i1 = i0 + stride;
float t0 = s_data_rp[i0];
float t1 = s_data_rp[i1];
s_data_rp[i0] = t0 + t1;
s_data_rp[i1] = t0 - t1;
}
}
//Main radix4 stages
stride >>= 2;
int pos = threadIdx.x;
for(; stride >= 1; stride >>= 2){
__syncthreads();
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
float d0 = s_data_rp[i0];
float d1 = s_data_rp[i1];
float d2 = s_data_rp[i2];
float d3 = s_data_rp[i3];
float t;
t = d0; d0 = d0 + d2; d2 = t - d2;
t = d1; d1 = d1 + d3; d3 = t - d3;
t = d0; s_data_rp[i0] = d0 + d1; s_data_rp[i1] = t - d1;
t = d2; s_data_rp[i2] = d2 + d3; s_data_rp[i3] = t - d3;
}
__syncthreads();
for(int pos = threadIdx.x; pos < N; pos += blockDim.x)
d_Dst[pos] = s_data_rp[pos];
}
////////////////////////////////////////////////////////////////////////////////
// Single in-global memory radix-4 Fast Walsh Transform pass
// (for strides exceeding elementary vector size)
////////////////////////////////////////////////////////////////////////////////
template<typename real_t>
__global__ void fwtBatch2Kernel(real_t *d_Output, real_t *d_Input, int stride) {
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int N = blockDim.x * gridDim.x * 4;
real_t *d_Src = d_Input + blockIdx.y * N;
real_t *d_Dst = d_Output + blockIdx.y * N;
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
real_t d0 = d_Src[i0];
real_t d1 = d_Src[i1];
real_t d2 = d_Src[i2];
real_t d3 = d_Src[i3];
real_t t;
t = d0; d0 = d0 + d2; d2 = t - d2;
t = d1; d1 = d1 + d3; d3 = t - d3;
t = d0; d_Dst[i0] = d0 + d1; d_Dst[i1] = t - d1;
t = d2; d_Dst[i2] = d2 + d3; d_Dst[i3] = t - d3;
}
////////////////////////////////////////////////////////////////////////////////
// Put everything together: batched Fast Walsh Transform CPU front-end
////////////////////////////////////////////////////////////////////////////////
template<typename real_t>
void fwtBatchGPUTemplate(real_t *d_Data, int M, int log2N, cudaStream_t stream) {
int N = 1 << log2N;
dim3 grid((1 << log2N) / 1024, M, 1);
for(; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2){
fwtBatch2Kernel<<<grid, 256, 0, stream>>>(d_Data, d_Data, N / 4);
CHECK_CUDA_ERROR(cudaPeekAtLastError());
}
fwtBatch1Kernel<<<M, N / 4, N * sizeof(real_t), stream>>>(d_Data, d_Data, log2N);
CHECK_CUDA_ERROR(cudaPeekAtLastError());
}
void fwtBatchGPU(double *d_Data, int M, int log2N, cudaStream_t stream) {
fwtBatchGPUTemplate(d_Data, M, log2N, stream);
}
void fwtBatchGPU(float *d_Data, int M, int log2N, cudaStream_t stream) {
fwtBatchGPUTemplate(d_Data, M, log2N, stream);
}
////////////////////////////////////////////////////////////////////////////////
// Modulate two arrays
////////////////////////////////////////////////////////////////////////////////
template<typename real_t>
__global__ void modulateKernel(real_t *d_A, real_t *d_B, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numThreads = blockDim.x * gridDim.x;
real_t rcpN = 1.0f / (real_t)N;
for (int pos = tid; pos < N; pos += numThreads)
d_A[pos] *= d_B[pos] * rcpN;
}
// Interface to modulateKernel()
void modulateGPU(double *d_A, double *d_B, int N, cudaStream_t stream) {
modulateKernel<<<128, 256, 0, stream>>>(d_A, d_B, N);
}
void modulateGPU(float *d_A, float *d_B, int N, cudaStream_t stream) {
modulateKernel<<<128, 256, 0, stream>>>(d_A, d_B, N);
}
////////////////////////////////////////////////////////////////////////////////
// Duplicate input
////////////////////////////////////////////////////////////////////////////////
__global__ void duplicate_input_kernel(double *input, float *input_rp, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
input_rp[i] = input[i];
}
}
void duplicate_input_gpu(double *input, float *input_rp, int N) {
int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
duplicate_input_kernel<<<gridDim, BLOCK_SIZE>>>(input, input_rp, N);
CHECK_CUDA_ERROR(cudaPeekAtLastError());
}
#endif
|
367042db4c1505428e994740100eb19a2ac17946.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <complex.h>
#include <hipfft.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "dnsparams.h"
#include "struct_def.h"
void allocate_memory(){
// // Allocate memory for statistics structs on both device and host
int n, nGPUs;
// // Declare extern variables (to pull def's from declare.h)
extern gpuinfo gpu;
extern fftinfo fft;
extern statistics *stats;
extern profile Yprofile;
extern double **k;
extern fielddata h_vel;
extern fielddata vel;
extern fielddata rhs;
extern fielddata rhs_old;
extern hipfftDoubleComplex **temp_advective;
// Make local copy of number of GPUs (for readability)
nGPUs = gpu.nGPUs;
printf("Allocating data on %d GPUs!\n",nGPUs);
// Allocate pinned memory on the host side that stores array of pointers for FFT operations
hipHostMalloc((void**)&fft, sizeof(fftinfo), hipHostMallocMapped);
hipHostMalloc((void**)&fft.p1d, nGPUs*sizeof(hipfftHandle *), hipHostMallocMapped); // Allocate memory for array of cufftHandles to store nGPUs worth 1d plans
hipHostMalloc((void**)&fft.p2d, nGPUs*sizeof(hipfftHandle *), hipHostMallocMapped); // Allocate memory array of 2dplans
hipHostMalloc((void**)&fft.invp2d, nGPUs*sizeof(hipfftHandle *), hipHostMallocMapped); // Array of inverse 2d plans
hipHostMalloc((void**)&fft.wsize_f, nGPUs*sizeof(size_t *), hipHostMallocMapped); // Size of workspace required for forward transform
hipHostMalloc((void**)&fft.wsize_i, nGPUs*sizeof(size_t *), hipHostMallocMapped); // Size of workspace required for inverse transform
hipHostMalloc((void**)&fft.wspace, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped); // Array of pointers to FFT workspace on each device
hipHostMalloc((void**)&fft.temp, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped); // Array of pointers to scratch (temporary) memory on each device
hipHostMalloc((void**)&fft.temp_reorder, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped); // Same as above, different temp variable
// Allocate memory on host to store averaged profile data
hipHostMalloc((void**)&Yprofile, sizeof(profile), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.u, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.v, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.w, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.s, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.uu, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.vv, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.ww, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&Yprofile.ss, nGPUs*sizeof(double *), hipHostMallocMapped);
// Allocate memory on host
h_vel.u = (double **)malloc(sizeof(double *)*nGPUs);
h_vel.v = (double **)malloc(sizeof(double *)*nGPUs);
h_vel.w = (double **)malloc(sizeof(double *)*nGPUs);
h_vel.s = (double **)malloc(sizeof(double *)*nGPUs);
// Allocate pinned memory on the host side that stores array of pointers
hipHostMalloc((void**)&k, nGPUs*sizeof(double *), hipHostMallocMapped);
hipHostMalloc((void**)&vel.uh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&vel.vh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&vel.wh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&vel.sh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&vel.left, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&vel.right, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs.uh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs.vh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs.wh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs.sh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs.left, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs.right, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs_old.uh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs_old.vh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs_old.wh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&rhs_old.sh, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
hipHostMalloc((void**)&temp_advective, nGPUs*sizeof(hipfftDoubleComplex *), hipHostMallocMapped);
// For statistics
hipHostMalloc(&stats, nGPUs*sizeof(statistics *), hipHostMallocMapped);
// Allocate memory for arrays
for (n = 0; n<nGPUs; ++n){
hipSetDevice(n);
h_vel.u[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
h_vel.v[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
h_vel.w[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
h_vel.s[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
checkCudaErrors( hipMalloc((void **)&k[n], sizeof(double)*NX ) );
// Allocate memory for velocity fields
checkCudaErrors( hipMalloc((void **)&vel.uh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&vel.vh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&vel.wh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&vel.sh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&vel.left[n], sizeof(hipfftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&vel.right[n], sizeof(hipfftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&rhs.uh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&rhs.vh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&rhs.wh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&rhs.sh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&rhs.left[n], sizeof(hipfftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&rhs.right[n], sizeof(hipfftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( hipMallocManaged((void **)&rhs_old.uh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMallocManaged((void **)&rhs_old.vh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMallocManaged((void **)&rhs_old.wh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMallocManaged((void **)&rhs_old.sh[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&temp_advective[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&fft.temp[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMalloc((void **)&fft.temp_reorder[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NZ2) );
// Statistics
checkCudaErrors( hipMallocManaged( (void **)&stats[n], sizeof(statistics) ));
// Averaged Profiles
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.u[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.v[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.w[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.s[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.uu[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.vv[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.ww[n], sizeof(double)*NY) );
checkCudaErrors( hipMallocManaged( (void **)&Yprofile.ss[n], sizeof(double)*NY) );
// Area statistics
checkCudaErrors( hipMallocManaged( (void **)&stats[n].area_scalar, sizeof(double)*64) );
checkCudaErrors( hipMallocManaged( (void **)&stats[n].area_omega, sizeof(double)*64) );
printf("Data allocated on Device %d\n", n);
}
// Cast pointers to complex arrays to real array and store in the proper struct field
vel.u = (hipfftDoubleReal **)vel.uh;
vel.v = (hipfftDoubleReal **)vel.vh;
vel.w = (hipfftDoubleReal **)vel.wh;
vel.s = (hipfftDoubleReal **)vel.sh;
rhs.u = (hipfftDoubleReal **)rhs.uh;
rhs.v = (hipfftDoubleReal **)rhs.vh;
rhs.w = (hipfftDoubleReal **)rhs.wh;
rhs.s = (hipfftDoubleReal **)rhs.sh;
rhs_old.u = (hipfftDoubleReal **)rhs_old.uh;
rhs_old.v = (hipfftDoubleReal **)rhs_old.vh;
rhs_old.w = (hipfftDoubleReal **)rhs_old.wh;
rhs_old.s = (hipfftDoubleReal **)rhs_old.sh;
// Initialize everything to 0 before entering the rest of the routine
for (n = 0; n<nGPUs; ++n){
hipSetDevice(n);
checkCudaErrors( hipMemset(k[n], 0, sizeof(double)*NX) );
checkCudaErrors( hipMemset(vel.u[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(vel.v[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(vel.w[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(vel.s[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs.u[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs.v[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs.w[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs.s[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs_old.u[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs_old.v[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs_old.w[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(rhs_old.s[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(temp_advective[n], 0, sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( hipMemset(Yprofile.u[n], 0, sizeof(double)*NY) );
checkCudaErrors( hipMemset(Yprofile.v[n], 0, sizeof(double)*NY) );
checkCudaErrors( hipMemset(Yprofile.w[n], 0, sizeof(double)*NY) );
checkCudaErrors( hipMemset(Yprofile.s[n], 0, sizeof(double)*NY) );
}
return;
}
void deallocate_memory(){
int n, nGPUs;
// // Declare extern variables (to pull def's from declare.h)
extern gpuinfo gpu;
extern fftinfo fft;
extern statistics h_stats;
extern statistics *stats;
extern profile Yprofile;
extern double **k;
extern fielddata h_vel;
extern fielddata vel;
extern fielddata rhs;
extern fielddata rhs_old;
extern hipfftDoubleComplex **temp_advective;
// Make local copy of number of GPUs (for readability)
nGPUs = gpu.nGPUs;
// Deallocate GPU memory
for(n = 0; n<nGPUs; ++n){
hipSetDevice(n);
hipFree(fft.temp[n]);
hipFree(fft.temp_reorder[n]);
hipFree(fft.wspace[n]);
hipFree(k[n]);
free(h_vel.u[n]);
free(h_vel.v[n]);
free(h_vel.w[n]);
free(h_vel.s[n]);
hipFree(vel.u[n]);
hipFree(vel.v[n]);
hipFree(vel.w[n]);
hipFree(vel.s[n]);
hipFree(rhs.u[n]);
hipFree(rhs.v[n]);
hipFree(rhs.w[n]);
hipFree(rhs.s[n]);
hipFree(rhs_old.u[n]);
hipFree(rhs_old.v[n]);
hipFree(rhs_old.w[n]);
hipFree(rhs_old.s[n]);
hipFree(temp_advective[n]);
hipFree(&stats[n]);
// Averaged Profiles
hipFree(Yprofile.u[n]);
hipFree(Yprofile.v[n]);
hipFree(Yprofile.w[n]);
hipFree(Yprofile.s[n]);
// Destroy cufft plans
hipfftDestroy(fft.p1d[n]);
hipfftDestroy(fft.p2d[n]);
hipfftDestroy(fft.invp2d[n]);
}
// Deallocate pointer arrays on host memory
hipHostFree(gpu.gpunum);
hipHostFree(gpu.ny);
hipHostFree(gpu.nx);
hipHostFree(gpu.start_x);
hipHostFree(gpu.start_y);
hipHostFree(k);
hipHostFree(temp_advective);
hipHostFree(fft.wsize_f);
hipHostFree(fft.wsize_i);
hipHostFree(fft.wspace);
hipHostFree(fft.temp);
hipHostFree(fft.temp_reorder);
hipHostFree(&fft);
hipHostFree(vel.uh);
hipHostFree(vel.vh);
hipHostFree(vel.wh);
hipHostFree(vel.sh);
hipHostFree(rhs.uh);
hipHostFree(rhs.vh);
hipHostFree(rhs.wh);
hipHostFree(rhs.sh);
hipHostFree(rhs_old.uh);
hipHostFree(rhs_old.vh);
hipHostFree(rhs_old.wh);
hipHostFree(rhs_old.sh);
hipHostFree(stats);
// Averaged Profiles
hipHostFree(Yprofile.u);
hipHostFree(Yprofile.v);
hipHostFree(Yprofile.w);
hipHostFree(Yprofile.s);
hipHostFree(&Yprofile);
// Deallocate memory on CPU
free(h_vel.u);
free(h_vel.v);
free(h_vel.w);
free(h_vel.s);
return;
}
| 367042db4c1505428e994740100eb19a2ac17946.cu | #include <stdlib.h>
#include <complex.h>
#include <cufft.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "dnsparams.h"
#include "struct_def.h"
void allocate_memory(){
// // Allocate memory for statistics structs on both device and host
int n, nGPUs;
// // Declare extern variables (to pull def's from declare.h)
extern gpuinfo gpu;
extern fftinfo fft;
extern statistics *stats;
extern profile Yprofile;
extern double **k;
extern fielddata h_vel;
extern fielddata vel;
extern fielddata rhs;
extern fielddata rhs_old;
extern cufftDoubleComplex **temp_advective;
// Make local copy of number of GPUs (for readability)
nGPUs = gpu.nGPUs;
printf("Allocating data on %d GPUs!\n",nGPUs);
// Allocate pinned memory on the host side that stores array of pointers for FFT operations
cudaHostAlloc((void**)&fft, sizeof(fftinfo), cudaHostAllocMapped);
cudaHostAlloc((void**)&fft.p1d, nGPUs*sizeof(cufftHandle *), cudaHostAllocMapped); // Allocate memory for array of cufftHandles to store nGPUs worth 1d plans
cudaHostAlloc((void**)&fft.p2d, nGPUs*sizeof(cufftHandle *), cudaHostAllocMapped); // Allocate memory array of 2dplans
cudaHostAlloc((void**)&fft.invp2d, nGPUs*sizeof(cufftHandle *), cudaHostAllocMapped); // Array of inverse 2d plans
cudaHostAlloc((void**)&fft.wsize_f, nGPUs*sizeof(size_t *), cudaHostAllocMapped); // Size of workspace required for forward transform
cudaHostAlloc((void**)&fft.wsize_i, nGPUs*sizeof(size_t *), cudaHostAllocMapped); // Size of workspace required for inverse transform
cudaHostAlloc((void**)&fft.wspace, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped); // Array of pointers to FFT workspace on each device
cudaHostAlloc((void**)&fft.temp, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped); // Array of pointers to scratch (temporary) memory on each device
cudaHostAlloc((void**)&fft.temp_reorder, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped); // Same as above, different temp variable
// Allocate memory on host to store averaged profile data
cudaHostAlloc((void**)&Yprofile, sizeof(profile), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.u, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.v, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.w, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.s, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.uu, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.vv, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.ww, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&Yprofile.ss, nGPUs*sizeof(double *), cudaHostAllocMapped);
// Allocate memory on host
h_vel.u = (double **)malloc(sizeof(double *)*nGPUs);
h_vel.v = (double **)malloc(sizeof(double *)*nGPUs);
h_vel.w = (double **)malloc(sizeof(double *)*nGPUs);
h_vel.s = (double **)malloc(sizeof(double *)*nGPUs);
// Allocate pinned memory on the host side that stores array of pointers
cudaHostAlloc((void**)&k, nGPUs*sizeof(double *), cudaHostAllocMapped);
cudaHostAlloc((void**)&vel.uh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&vel.vh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&vel.wh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&vel.sh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&vel.left, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&vel.right, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs.uh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs.vh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs.wh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs.sh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs.left, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs.right, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs_old.uh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs_old.vh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs_old.wh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&rhs_old.sh, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
cudaHostAlloc((void**)&temp_advective, nGPUs*sizeof(cufftDoubleComplex *), cudaHostAllocMapped);
// For statistics
cudaHostAlloc(&stats, nGPUs*sizeof(statistics *), cudaHostAllocMapped);
// Allocate memory for arrays
for (n = 0; n<nGPUs; ++n){
cudaSetDevice(n);
h_vel.u[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
h_vel.v[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
h_vel.w[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
h_vel.s[n] = (double *)malloc(sizeof(complex double)*gpu.nx[n]*NY*NZ2);
checkCudaErrors( cudaMalloc((void **)&k[n], sizeof(double)*NX ) );
// Allocate memory for velocity fields
checkCudaErrors( cudaMalloc((void **)&vel.uh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&vel.vh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&vel.wh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&vel.sh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&vel.left[n], sizeof(cufftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&vel.right[n], sizeof(cufftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&rhs.uh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&rhs.vh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&rhs.wh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&rhs.sh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&rhs.left[n], sizeof(cufftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&rhs.right[n], sizeof(cufftDoubleComplex)*RAD*NY*NZ2) );
checkCudaErrors( cudaMallocManaged((void **)&rhs_old.uh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMallocManaged((void **)&rhs_old.vh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMallocManaged((void **)&rhs_old.wh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMallocManaged((void **)&rhs_old.sh[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&temp_advective[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&fft.temp[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMalloc((void **)&fft.temp_reorder[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NZ2) );
// Statistics
checkCudaErrors( cudaMallocManaged( (void **)&stats[n], sizeof(statistics) ));
// Averaged Profiles
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.u[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.v[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.w[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.s[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.uu[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.vv[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.ww[n], sizeof(double)*NY) );
checkCudaErrors( cudaMallocManaged( (void **)&Yprofile.ss[n], sizeof(double)*NY) );
// Area statistics
checkCudaErrors( cudaMallocManaged( (void **)&stats[n].area_scalar, sizeof(double)*64) );
checkCudaErrors( cudaMallocManaged( (void **)&stats[n].area_omega, sizeof(double)*64) );
printf("Data allocated on Device %d\n", n);
}
// Cast pointers to complex arrays to real array and store in the proper struct field
vel.u = (cufftDoubleReal **)vel.uh;
vel.v = (cufftDoubleReal **)vel.vh;
vel.w = (cufftDoubleReal **)vel.wh;
vel.s = (cufftDoubleReal **)vel.sh;
rhs.u = (cufftDoubleReal **)rhs.uh;
rhs.v = (cufftDoubleReal **)rhs.vh;
rhs.w = (cufftDoubleReal **)rhs.wh;
rhs.s = (cufftDoubleReal **)rhs.sh;
rhs_old.u = (cufftDoubleReal **)rhs_old.uh;
rhs_old.v = (cufftDoubleReal **)rhs_old.vh;
rhs_old.w = (cufftDoubleReal **)rhs_old.wh;
rhs_old.s = (cufftDoubleReal **)rhs_old.sh;
// Initialize everything to 0 before entering the rest of the routine
for (n = 0; n<nGPUs; ++n){
cudaSetDevice(n);
checkCudaErrors( cudaMemset(k[n], 0, sizeof(double)*NX) );
checkCudaErrors( cudaMemset(vel.u[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(vel.v[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(vel.w[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(vel.s[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs.u[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs.v[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs.w[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs.s[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs_old.u[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs_old.v[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs_old.w[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(rhs_old.s[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(temp_advective[n], 0, sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2) );
checkCudaErrors( cudaMemset(Yprofile.u[n], 0, sizeof(double)*NY) );
checkCudaErrors( cudaMemset(Yprofile.v[n], 0, sizeof(double)*NY) );
checkCudaErrors( cudaMemset(Yprofile.w[n], 0, sizeof(double)*NY) );
checkCudaErrors( cudaMemset(Yprofile.s[n], 0, sizeof(double)*NY) );
}
return;
}
void deallocate_memory(){
int n, nGPUs;
// // Declare extern variables (to pull def's from declare.h)
extern gpuinfo gpu;
extern fftinfo fft;
extern statistics h_stats;
extern statistics *stats;
extern profile Yprofile;
extern double **k;
extern fielddata h_vel;
extern fielddata vel;
extern fielddata rhs;
extern fielddata rhs_old;
extern cufftDoubleComplex **temp_advective;
// Make local copy of number of GPUs (for readability)
nGPUs = gpu.nGPUs;
// Deallocate GPU memory
for(n = 0; n<nGPUs; ++n){
cudaSetDevice(n);
cudaFree(fft.temp[n]);
cudaFree(fft.temp_reorder[n]);
cudaFree(fft.wspace[n]);
cudaFree(k[n]);
free(h_vel.u[n]);
free(h_vel.v[n]);
free(h_vel.w[n]);
free(h_vel.s[n]);
cudaFree(vel.u[n]);
cudaFree(vel.v[n]);
cudaFree(vel.w[n]);
cudaFree(vel.s[n]);
cudaFree(rhs.u[n]);
cudaFree(rhs.v[n]);
cudaFree(rhs.w[n]);
cudaFree(rhs.s[n]);
cudaFree(rhs_old.u[n]);
cudaFree(rhs_old.v[n]);
cudaFree(rhs_old.w[n]);
cudaFree(rhs_old.s[n]);
cudaFree(temp_advective[n]);
cudaFree(&stats[n]);
// Averaged Profiles
cudaFree(Yprofile.u[n]);
cudaFree(Yprofile.v[n]);
cudaFree(Yprofile.w[n]);
cudaFree(Yprofile.s[n]);
// Destroy cufft plans
cufftDestroy(fft.p1d[n]);
cufftDestroy(fft.p2d[n]);
cufftDestroy(fft.invp2d[n]);
}
// Deallocate pointer arrays on host memory
cudaFreeHost(gpu.gpunum);
cudaFreeHost(gpu.ny);
cudaFreeHost(gpu.nx);
cudaFreeHost(gpu.start_x);
cudaFreeHost(gpu.start_y);
cudaFreeHost(k);
cudaFreeHost(temp_advective);
cudaFreeHost(fft.wsize_f);
cudaFreeHost(fft.wsize_i);
cudaFreeHost(fft.wspace);
cudaFreeHost(fft.temp);
cudaFreeHost(fft.temp_reorder);
cudaFreeHost(&fft);
cudaFreeHost(vel.uh);
cudaFreeHost(vel.vh);
cudaFreeHost(vel.wh);
cudaFreeHost(vel.sh);
cudaFreeHost(rhs.uh);
cudaFreeHost(rhs.vh);
cudaFreeHost(rhs.wh);
cudaFreeHost(rhs.sh);
cudaFreeHost(rhs_old.uh);
cudaFreeHost(rhs_old.vh);
cudaFreeHost(rhs_old.wh);
cudaFreeHost(rhs_old.sh);
cudaFreeHost(stats);
// Averaged Profiles
cudaFreeHost(Yprofile.u);
cudaFreeHost(Yprofile.v);
cudaFreeHost(Yprofile.w);
cudaFreeHost(Yprofile.s);
cudaFreeHost(&Yprofile);
// Deallocate memory on CPU
free(h_vel.u);
free(h_vel.v);
free(h_vel.w);
free(h_vel.s);
return;
}
|
d5238fbebb29ca920adaaebed33feadfd56f9a85.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved.
// Copyright 2015. UChicago Argonne, LLC. This software was produced
// under U.S. Government contract DE-AC02-06CH11357 for Argonne National
// Laboratory (ANL), which is operated by UChicago Argonne, LLC for the
// U.S. Department of Energy. The U.S. Government has rights to use,
// reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
// UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
// ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is
// modified to produce derivative works, such modified software should
// be clearly marked, so as not to confuse it with the version available
// from ANL.
// Additionally, redistribution and use in source and binary forms, with
// or without modification, are permitted provided that the following
// conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation andwith the
// distribution.
// * Neither the name of UChicago Argonne, LLC, Argonne National
// Laboratory, ANL, the U.S. Government, nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago
// Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// ---------------------------------------------------------------
// TOMOPY CUDA implementation
#include "common.hh"
#include "macros.hh"
#include "utils.hh"
//======================================================================================//
#if defined(TOMOPY_USE_NVTX)
nvtxEventAttributes_t nvtx_total;
nvtxEventAttributes_t nvtx_iteration;
nvtxEventAttributes_t nvtx_slice;
nvtxEventAttributes_t nvtx_projection;
nvtxEventAttributes_t nvtx_update;
nvtxEventAttributes_t nvtx_rotate;
//--------------------------------------------------------------------------------------//
void
init_nvtx()
{
static bool first = true;
if(!first)
return;
first = false;
nvtx_total.version = NVTX_VERSION;
nvtx_total.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_total.colorType = NVTX_COLOR_ARGB;
nvtx_total.color = 0xff0000ff; /* blue? */
nvtx_total.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_total.message.ascii = "total time for all iterations";
nvtx_iteration.version = NVTX_VERSION;
nvtx_iteration.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_iteration.colorType = NVTX_COLOR_ARGB;
nvtx_iteration.color = 0xffffff00; /* yellow */
nvtx_iteration.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_iteration.message.ascii = "time per iteration";
nvtx_slice.version = NVTX_VERSION;
nvtx_slice.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_slice.colorType = NVTX_COLOR_ARGB;
nvtx_slice.color = 0xff00ffff; /* cyan */
nvtx_slice.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_slice.message.ascii = "time per slice";
nvtx_projection.version = NVTX_VERSION;
nvtx_projection.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_projection.colorType = NVTX_COLOR_ARGB;
nvtx_projection.color = 0xff00ffff; /* pink */
nvtx_projection.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_projection.message.ascii = "time per projection";
nvtx_update.version = NVTX_VERSION;
nvtx_update.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_update.colorType = NVTX_COLOR_ARGB;
nvtx_update.color = 0xff99ff99; /* light green */
nvtx_update.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_update.message.ascii = "time updating";
nvtx_rotate.version = NVTX_VERSION;
nvtx_rotate.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_rotate.colorType = NVTX_COLOR_ARGB;
nvtx_rotate.color = 0xff0000ff; /* blue? */
nvtx_rotate.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_rotate.message.ascii = "time rotating";
}
#endif
//======================================================================================//
int
cuda_set_device(int device)
{
int deviceCount = cuda_device_count();
if(deviceCount == 0)
return -1;
// don't set to higher than number of devices
device = device % deviceCount;
// update thread-static variable
this_thread_device() = device;
// actually set the device
hipSetDevice(device);
// return the modulus
return device;
}
//======================================================================================//
int
cuda_multi_processor_count()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
hipSetDevice(device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.multiProcessorCount);
}
//======================================================================================//
int
cuda_max_threads_per_block()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
hipSetDevice(device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.maxThreadsPerBlock);
}
//======================================================================================//
int
cuda_warp_size()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
hipSetDevice(device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.warpSize);
}
//======================================================================================//
int
cuda_shared_memory_per_block()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
hipSetDevice(device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.sharedMemPerBlock);
}
//======================================================================================//
int
cuda_device_count()
{
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if(error_id != hipSuccess)
return 0;
return deviceCount;
}
//======================================================================================//
void
cuda_device_query()
{
auto pythreads = GetEnv("TOMOPY_PYTHON_THREADS", HW_CONCURRENCY);
static std::atomic<int16_t> _once;
auto _count = _once++;
if(_count + 1 == pythreads)
_once.store(0);
if(_count > 0)
return;
int deviceCount = 0;
int driverVersion = 0;
int runtimeVersion = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if(error_id != hipSuccess)
{
printf("hipGetDeviceCount returned error code %d\n--> %s\n",
static_cast<int>(error_id), hipGetErrorString(error_id));
if(deviceCount > 0)
{
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
printf("\nDevice %d: \"%s\"\n", 0, deviceProp.name);
// Console log
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / "
"%d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
}
return;
}
AutoLock l(TypeMutex<decltype(std::cout)>());
if(deviceCount == 0)
printf("No available CUDA device(s) detected\n");
else
printf("Detected %d CUDA capable devices\n", deviceCount);
for(int dev = 0; dev < deviceCount; ++dev)
{
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000,
(runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
char msg[256];
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long) deviceProp.totalGlobalMem);
#else
snprintf(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long) deviceProp.totalGlobalMem);
#endif
printf("%s", msg);
printf(" GPU Max Clock rate: %.0f MHz (%0.2f "
"GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 5000
// This is supported in CUDA 5.0 (runtime API device properties)
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if(deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
#else
// This only available in CUDA 4.0-4.2 (but these were only exposed in
// the CUDA Driver API)
int memoryClock;
int memBusWidth;
int L2CacheSize;
getCudaAttribute<int>(&memoryClock, hipDeviceAttributeMemoryClockRate, dev);
printf(" Memory Clock rate: %.0f Mhz\n",
memoryClock * 1e-3f);
getCudaAttribute<int>(&memBusWidth, hipDeviceAttributeMemoryBusWidth,
dev);
printf(" Memory Bus Width: %d-bit\n", memBusWidth);
getCudaAttribute<int>(&L2CacheSize, hipDeviceAttributeL2CacheSize, dev);
if(L2CacheSize)
printf(" L2 Cache Size: %d bytes\n",
L2CacheSize);
#endif
printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, "
"%d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0],
deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d "
"layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d "
"layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Multiprocessor count: %d\n",
deviceProp.multiProcessorCount);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n",
deviceProp.textureAlignment);
printf(" Concurrent copy and kernel execution: %s with %d copy "
"engine(s)\n",
(deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n",
deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n",
deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n",
deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n",
deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n",
deviceProp.ECCEnabled ? "Enabled" : "Disabled");
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n",
deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)"
: "WDDM (Windows Display Driver Model)");
#endif
printf(" Device supports Unified Addressing (UVA): %s\n",
deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device supports Compute Preemption: %s\n",
deviceProp.computePreemptionSupported ? "Yes" : "No");
printf(" Supports Cooperative Kernel Launch: %s\n",
deviceProp.cooperativeLaunch ? "Yes" : "No");
printf(" Supports MultiDevice Co-op Kernel Launch: %s\n",
deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No");
printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n",
deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
const char* sComputeMode[] = {
"Default (multiple host threads can use ::hipSetDevice() with "
"device "
"simultaneously)",
"Exclusive (only one host thread in one process is able to use "
"::hipSetDevice() with this device)",
"Prohibited (no host thread can use ::hipSetDevice() with this "
"device)",
"Exclusive Process (many threads in one process is able to use "
"::hipSetDevice() with this device)",
"Unknown",
nullptr
};
printf(" Compute Mode:\n");
printf(" < %s >\n", sComputeMode[deviceProp.computeMode]);
}
printf("\n\n");
CUDA_CHECK_LAST_ERROR();
}
//======================================================================================//
| d5238fbebb29ca920adaaebed33feadfd56f9a85.cu | // Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved.
// Copyright 2015. UChicago Argonne, LLC. This software was produced
// under U.S. Government contract DE-AC02-06CH11357 for Argonne National
// Laboratory (ANL), which is operated by UChicago Argonne, LLC for the
// U.S. Department of Energy. The U.S. Government has rights to use,
// reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
// UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
// ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is
// modified to produce derivative works, such modified software should
// be clearly marked, so as not to confuse it with the version available
// from ANL.
// Additionally, redistribution and use in source and binary forms, with
// or without modification, are permitted provided that the following
// conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation andwith the
// distribution.
// * Neither the name of UChicago Argonne, LLC, Argonne National
// Laboratory, ANL, the U.S. Government, nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago
// Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// ---------------------------------------------------------------
// TOMOPY CUDA implementation
#include "common.hh"
#include "macros.hh"
#include "utils.hh"
//======================================================================================//
#if defined(TOMOPY_USE_NVTX)
nvtxEventAttributes_t nvtx_total;
nvtxEventAttributes_t nvtx_iteration;
nvtxEventAttributes_t nvtx_slice;
nvtxEventAttributes_t nvtx_projection;
nvtxEventAttributes_t nvtx_update;
nvtxEventAttributes_t nvtx_rotate;
//--------------------------------------------------------------------------------------//
void
init_nvtx()
{
static bool first = true;
if(!first)
return;
first = false;
nvtx_total.version = NVTX_VERSION;
nvtx_total.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_total.colorType = NVTX_COLOR_ARGB;
nvtx_total.color = 0xff0000ff; /* blue? */
nvtx_total.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_total.message.ascii = "total time for all iterations";
nvtx_iteration.version = NVTX_VERSION;
nvtx_iteration.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_iteration.colorType = NVTX_COLOR_ARGB;
nvtx_iteration.color = 0xffffff00; /* yellow */
nvtx_iteration.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_iteration.message.ascii = "time per iteration";
nvtx_slice.version = NVTX_VERSION;
nvtx_slice.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_slice.colorType = NVTX_COLOR_ARGB;
nvtx_slice.color = 0xff00ffff; /* cyan */
nvtx_slice.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_slice.message.ascii = "time per slice";
nvtx_projection.version = NVTX_VERSION;
nvtx_projection.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_projection.colorType = NVTX_COLOR_ARGB;
nvtx_projection.color = 0xff00ffff; /* pink */
nvtx_projection.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_projection.message.ascii = "time per projection";
nvtx_update.version = NVTX_VERSION;
nvtx_update.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_update.colorType = NVTX_COLOR_ARGB;
nvtx_update.color = 0xff99ff99; /* light green */
nvtx_update.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_update.message.ascii = "time updating";
nvtx_rotate.version = NVTX_VERSION;
nvtx_rotate.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
nvtx_rotate.colorType = NVTX_COLOR_ARGB;
nvtx_rotate.color = 0xff0000ff; /* blue? */
nvtx_rotate.messageType = NVTX_MESSAGE_TYPE_ASCII;
nvtx_rotate.message.ascii = "time rotating";
}
#endif
//======================================================================================//
int
cuda_set_device(int device)
{
int deviceCount = cuda_device_count();
if(deviceCount == 0)
return -1;
// don't set to higher than number of devices
device = device % deviceCount;
// update thread-static variable
this_thread_device() = device;
// actually set the device
cudaSetDevice(device);
// return the modulus
return device;
}
//======================================================================================//
int
cuda_multi_processor_count()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.multiProcessorCount);
}
//======================================================================================//
int
cuda_max_threads_per_block()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.maxThreadsPerBlock);
}
//======================================================================================//
int
cuda_warp_size()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.warpSize);
}
//======================================================================================//
int
cuda_shared_memory_per_block()
{
if(cuda_device_count() == 0)
return 0;
// keep from querying device
static thread_local cuda_device_info<int>* _instance = new cuda_device_info<int>();
// use the thread assigned devices
int device = this_thread_device();
if(_instance->find(device) != _instance->end())
return _instance->find(device)->second;
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
return ((*_instance)[device] = deviceProp.sharedMemPerBlock);
}
//======================================================================================//
int
cuda_device_count()
{
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if(error_id != cudaSuccess)
return 0;
return deviceCount;
}
//======================================================================================//
void
cuda_device_query()
{
auto pythreads = GetEnv("TOMOPY_PYTHON_THREADS", HW_CONCURRENCY);
static std::atomic<int16_t> _once;
auto _count = _once++;
if(_count + 1 == pythreads)
_once.store(0);
if(_count > 0)
return;
int deviceCount = 0;
int driverVersion = 0;
int runtimeVersion = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if(error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned error code %d\n--> %s\n",
static_cast<int>(error_id), cudaGetErrorString(error_id));
if(deviceCount > 0)
{
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf("\nDevice %d: \"%s\"\n", 0, deviceProp.name);
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / "
"%d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
}
return;
}
AutoLock l(TypeMutex<decltype(std::cout)>());
if(deviceCount == 0)
printf("No available CUDA device(s) detected\n");
else
printf("Detected %d CUDA capable devices\n", deviceCount);
for(int dev = 0; dev < deviceCount; ++dev)
{
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000,
(runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
char msg[256];
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long) deviceProp.totalGlobalMem);
#else
snprintf(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long) deviceProp.totalGlobalMem);
#endif
printf("%s", msg);
printf(" GPU Max Clock rate: %.0f MHz (%0.2f "
"GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 5000
// This is supported in CUDA 5.0 (runtime API device properties)
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if(deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
#else
// This only available in CUDA 4.0-4.2 (but these were only exposed in
// the CUDA Driver API)
int memoryClock;
int memBusWidth;
int L2CacheSize;
getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev);
printf(" Memory Clock rate: %.0f Mhz\n",
memoryClock * 1e-3f);
getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH,
dev);
printf(" Memory Bus Width: %d-bit\n", memBusWidth);
getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);
if(L2CacheSize)
printf(" L2 Cache Size: %d bytes\n",
L2CacheSize);
#endif
printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, "
"%d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0],
deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d "
"layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d "
"layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Multiprocessor count: %d\n",
deviceProp.multiProcessorCount);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n",
deviceProp.textureAlignment);
printf(" Concurrent copy and kernel execution: %s with %d copy "
"engine(s)\n",
(deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n",
deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n",
deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n",
deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n",
deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n",
deviceProp.ECCEnabled ? "Enabled" : "Disabled");
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n",
deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)"
: "WDDM (Windows Display Driver Model)");
#endif
printf(" Device supports Unified Addressing (UVA): %s\n",
deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device supports Compute Preemption: %s\n",
deviceProp.computePreemptionSupported ? "Yes" : "No");
printf(" Supports Cooperative Kernel Launch: %s\n",
deviceProp.cooperativeLaunch ? "Yes" : "No");
printf(" Supports MultiDevice Co-op Kernel Launch: %s\n",
deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No");
printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n",
deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
const char* sComputeMode[] = {
"Default (multiple host threads can use ::cudaSetDevice() with "
"device "
"simultaneously)",
"Exclusive (only one host thread in one process is able to use "
"::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this "
"device)",
"Exclusive Process (many threads in one process is able to use "
"::cudaSetDevice() with this device)",
"Unknown",
nullptr
};
printf(" Compute Mode:\n");
printf(" < %s >\n", sComputeMode[deviceProp.computeMode]);
}
printf("\n\n");
CUDA_CHECK_LAST_ERROR();
}
//======================================================================================//
|
6b6f1444faa0236af4b18f15a328db27fc4439be.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "stats/stddev.h"
#include "stats/mean.h"
#include "random/rng.h"
#include "test_utils.h"
#include "matrix/math.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct StdDevInputs {
T tolerance, mean, stddev;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const StdDevInputs<T>& dims) {
return os;
}
template <typename T>
class StdDevTest: public ::testing::TestWithParam<StdDevInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<StdDevInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
allocate(data, len);
allocate(mean_act, cols);
allocate(stddev_act, cols);
allocate(vars_act, cols);
r.normal(data, len, params.mean, params.stddev);
stdVarSGtest(data);
CUDA_CHECK(hipGetDeviceCount(&device_count));
if (device_count > 1) {
T *h_data = (T *) malloc(len * sizeof(T));
updateHost(h_data, data, len);
stdMGColSplitTest(h_data);
free(h_data);
}
}
void stdVarSGtest(T *data) {
int rows = params.rows, cols = params.cols;
mean(mean_act, data, cols, rows, params.sample, params.rowMajor);
stddev(stddev_act, data, mean_act, cols, rows, params.sample,
params.rowMajor);
vars(vars_act, data, mean_act, cols, rows, params.sample,
params.rowMajor);
Matrix::seqRoot(vars_act, T(1), cols);
}
void stdMGColSplitTest(T *h_data) {
int n_gpus = 2;
TypeMG<T> d_data[n_gpus];
TypeMG<T> d_mu[n_gpus];
TypeMG<T> d_std[n_gpus];
for (int i = 0; i < n_gpus; i++) {
d_data[i].gpu_id = i;
d_mu[i].gpu_id = i;
d_std[i].gpu_id = i;
CUDA_CHECK(hipSetDevice(d_data[i].gpu_id));
CUDA_CHECK(hipStreamCreate(&(d_data[i].stream)));
d_mu[i].stream = d_data[i].stream;
d_std[i].stream = d_data[i].stream;
}
allocateMG(d_data, n_gpus, params.rows, params.cols, true, true, false);
allocateMG(d_mu, n_gpus, 1, params.cols, true, true, false);
allocateMG(d_std, n_gpus, 1, params.cols, true, true, false);
updateDeviceMG(d_data, h_data, n_gpus, false);
meanMG(d_mu, d_data, params.cols, params.rows, n_gpus, true, false,
false, false);
stddevMG(d_std, d_data, d_mu, params.cols, params.rows,n_gpus, true, false, false,
false);
int len = params.cols;
T *h_std = (T *) malloc(len * sizeof(T));
updateHostMG(h_std, d_std, n_gpus, false);
streamSyncMG(d_data, n_gpus);
streamDestroyGPUs(d_data, n_gpus);
freeMG(d_data, n_gpus);
freeMG(d_mu, n_gpus);
freeMG(d_std, n_gpus);
allocate(stddev_act_2, len);
updateDevice(stddev_act_2, h_std, len);
free(h_std);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(mean_act));
CUDA_CHECK(hipFree(stddev_act));
CUDA_CHECK(hipFree(vars_act));
if (device_count > 1) {
CUDA_CHECK(hipFree(stddev_act_2));
}
}
protected:
StdDevInputs<T> params;
T *data, *mean_act, *stddev_act, *vars_act, *stddev_act_2;
int device_count = 0;
};
const std::vector<StdDevInputs<float> > inputsf = {
{0.05f, 1.f, 2.f, 1024, 32, true, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 64, true, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 128, true, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 256, true, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 32, false, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 64, false, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 128, false, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 256, false, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 32, true, true, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 64, true, true, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 128, true, true, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 256, true, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 32, false, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 64, false, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 128, false, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}
};
const std::vector<StdDevInputs<double> > inputsd = {
{0.05, 1.0, 2.0, 1024, 32, true, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 64, true, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 128, true, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 256, true, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 32, false, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 64, false, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 128, false, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 256, false, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 32, true, true, 1234ULL},
{0.05, 1.0, 2.0, 1024, 64, true, true, 1234ULL},
{0.05, 1.0, 2.0, 1024, 128, true, true, 1234ULL},
{0.05, 1.0, 2.0, 1024, 256, true, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 32, false, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 64, false, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 128, false, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 256, false, true, 1234ULL}
};
typedef StdDevTest<float> StdDevTestF;
TEST_P(StdDevTestF, Result) {
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols,
CompareApprox<float>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act_2, params.cols,
CompareApprox<float>(params.tolerance)));
}
}
typedef StdDevTest<double> StdDevTestD;
TEST_P(StdDevTestD, Result){
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols,
CompareApprox<double>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act_2, params.cols,
CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd));
} // end namespace Stats
} // end namespace MLCommon
| 6b6f1444faa0236af4b18f15a328db27fc4439be.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "stats/stddev.h"
#include "stats/mean.h"
#include "random/rng.h"
#include "test_utils.h"
#include "matrix/math.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct StdDevInputs {
T tolerance, mean, stddev;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const StdDevInputs<T>& dims) {
return os;
}
template <typename T>
class StdDevTest: public ::testing::TestWithParam<StdDevInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<StdDevInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
allocate(data, len);
allocate(mean_act, cols);
allocate(stddev_act, cols);
allocate(vars_act, cols);
r.normal(data, len, params.mean, params.stddev);
stdVarSGtest(data);
CUDA_CHECK(cudaGetDeviceCount(&device_count));
if (device_count > 1) {
T *h_data = (T *) malloc(len * sizeof(T));
updateHost(h_data, data, len);
stdMGColSplitTest(h_data);
free(h_data);
}
}
void stdVarSGtest(T *data) {
int rows = params.rows, cols = params.cols;
mean(mean_act, data, cols, rows, params.sample, params.rowMajor);
stddev(stddev_act, data, mean_act, cols, rows, params.sample,
params.rowMajor);
vars(vars_act, data, mean_act, cols, rows, params.sample,
params.rowMajor);
Matrix::seqRoot(vars_act, T(1), cols);
}
void stdMGColSplitTest(T *h_data) {
int n_gpus = 2;
TypeMG<T> d_data[n_gpus];
TypeMG<T> d_mu[n_gpus];
TypeMG<T> d_std[n_gpus];
for (int i = 0; i < n_gpus; i++) {
d_data[i].gpu_id = i;
d_mu[i].gpu_id = i;
d_std[i].gpu_id = i;
CUDA_CHECK(cudaSetDevice(d_data[i].gpu_id));
CUDA_CHECK(cudaStreamCreate(&(d_data[i].stream)));
d_mu[i].stream = d_data[i].stream;
d_std[i].stream = d_data[i].stream;
}
allocateMG(d_data, n_gpus, params.rows, params.cols, true, true, false);
allocateMG(d_mu, n_gpus, 1, params.cols, true, true, false);
allocateMG(d_std, n_gpus, 1, params.cols, true, true, false);
updateDeviceMG(d_data, h_data, n_gpus, false);
meanMG(d_mu, d_data, params.cols, params.rows, n_gpus, true, false,
false, false);
stddevMG(d_std, d_data, d_mu, params.cols, params.rows,n_gpus, true, false, false,
false);
int len = params.cols;
T *h_std = (T *) malloc(len * sizeof(T));
updateHostMG(h_std, d_std, n_gpus, false);
streamSyncMG(d_data, n_gpus);
streamDestroyGPUs(d_data, n_gpus);
freeMG(d_data, n_gpus);
freeMG(d_mu, n_gpus);
freeMG(d_std, n_gpus);
allocate(stddev_act_2, len);
updateDevice(stddev_act_2, h_std, len);
free(h_std);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(mean_act));
CUDA_CHECK(cudaFree(stddev_act));
CUDA_CHECK(cudaFree(vars_act));
if (device_count > 1) {
CUDA_CHECK(cudaFree(stddev_act_2));
}
}
protected:
StdDevInputs<T> params;
T *data, *mean_act, *stddev_act, *vars_act, *stddev_act_2;
int device_count = 0;
};
const std::vector<StdDevInputs<float> > inputsf = {
{0.05f, 1.f, 2.f, 1024, 32, true, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 64, true, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 128, true, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 256, true, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 32, false, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 64, false, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 128, false, false, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 256, false, false, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 32, true, true, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 64, true, true, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 128, true, true, 1234ULL},
{0.05f, 1.f, 2.f, 1024, 256, true, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 32, false, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 64, false, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 128, false, true, 1234ULL},
{0.05f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}
};
const std::vector<StdDevInputs<double> > inputsd = {
{0.05, 1.0, 2.0, 1024, 32, true, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 64, true, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 128, true, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 256, true, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 32, false, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 64, false, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 128, false, false, 1234ULL},
{0.05, -1.0, 2.0, 1024, 256, false, false, 1234ULL},
{0.05, 1.0, 2.0, 1024, 32, true, true, 1234ULL},
{0.05, 1.0, 2.0, 1024, 64, true, true, 1234ULL},
{0.05, 1.0, 2.0, 1024, 128, true, true, 1234ULL},
{0.05, 1.0, 2.0, 1024, 256, true, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 32, false, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 64, false, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 128, false, true, 1234ULL},
{0.05, -1.0, 2.0, 1024, 256, false, true, 1234ULL}
};
typedef StdDevTest<float> StdDevTestF;
TEST_P(StdDevTestF, Result) {
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols,
CompareApprox<float>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act_2, params.cols,
CompareApprox<float>(params.tolerance)));
}
}
typedef StdDevTest<double> StdDevTestD;
TEST_P(StdDevTestD, Result){
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols,
CompareApprox<double>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(params.stddev, stddev_act_2, params.cols,
CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd));
} // end namespace Stats
} // end namespace MLCommon
|
a5e2aca489e5bc27bc59f0695cb64586cbebd8c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweepIteration(int n, int d, int *idata) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
k = k * (1 << (d + 1)) + (1 << (d + 1)) - 1;
if (k > n || k < 0) { return; }
int offset = 1 << d + 1;
int old_val = idata[k];
idata[k] = idata[k] + idata[k - (offset / 2)];
//printf("d = %i, %i off %i %i oldval: %i val: %i\n", d, k, offset, offset / 2,old_val, idata[k]);
}
__global__ void kernDownSweepIteration(int n, int d, int *idata) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
k = k * (1 << (d + 1)) + (1 << (d + 1)) - 1;
if (k > n || k < 0) { return; }
int offset = 1 << d + 1;
int add = idata[k] + idata[k - (offset / 2)];
int replace = idata[k];
int old_val = idata[k];
idata[k - (offset / 2)] = replace;
idata[k] = add;
//printf("d = %i, %i off %i %i oldval: %i val: %i\n", d, k, offset, offset / 2, old_val, idata[k]);
}
__global__ void kernSetZero(int n, int *idata) {
idata[n] = 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
dim3 threadsPerBlock(128);
int blockSize = 128;
int arr_size = n;
int log = ilog2ceil(arr_size);
if ((n & (n - 1)) != 0) {
arr_size = 1 << log;
}
int* dev_odata;
hipMalloc((void**)&dev_odata, arr_size * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
int* dev_shiftdata;
hipMalloc((void**)&dev_shiftdata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_shiftdata failed!");
hipMemcpy(dev_odata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
timer().startGpuTimer();
//UPSWEEP
log = ilog2ceil(arr_size);
for (int d = 0; d < log; d++) {
int off_n = arr_size / (1 << (d + 1));
dim3 fullBlocksPerGrid((off_n + blockSize - 1) / blockSize);
kernUpSweepIteration << <fullBlocksPerGrid, blockSize >> > (arr_size, d, dev_odata);
}
//DOWNSWEEP
hipLaunchKernelGGL(( kernSetZero) , dim3(1), dim3(1) , 0, 0, arr_size - 1, dev_odata);
for (int d = log - 1; d >= 0; d--) {
int off_n = arr_size / (1 << (d + 1));
dim3 fullBlocksPerGrid((off_n + blockSize - 1) / blockSize);
kernDownSweepIteration << <fullBlocksPerGrid, blockSize >> > (arr_size, d, dev_odata);
}
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy failed!");
hipFree(dev_odata);
hipFree(dev_shiftdata);
}
__global__ void kernMapToBoolean(int n, int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > n) { return; }
int data = idata[index];
if (data != 0) {
idata[index] = 1;
}
}
__global__ void kernScatter(int n, int *odata, int *idata, int *scanOutData, int *boolOutData) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > n) { return; }
int boolIndex = boolOutData[index];
if (boolIndex != 0) {
odata[scanOutData[index]] = idata[index];
}
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int blockSize = 128;
dim3 threadsPerBlock(blockSize);
int* dev_idata;
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
int* dev_boolData;
hipMalloc((void**)&dev_boolData, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
hipMemcpy(dev_boolData, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
int* dev_scanData;
hipMalloc((void**)&dev_scanData, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
int* dev_odata;
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
//timer().startGpuTimer();
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_boolData);
int *boolOutData = new int[n];
hipMemcpy(boolOutData, dev_boolData, sizeof(int) * n, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy failed!");
/*for (int i = 0; i < 10; i++) {
printf("%i %i\n", i, boolOutData[i]);
}*/
int *scanOutData = new int[n];
scan(n,scanOutData ,boolOutData);
int compact_size = scanOutData[n-1] + boolOutData[n-1];
hipMemcpy(dev_scanData, scanOutData, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
kernScatter << <fullBlocksPerGrid, blockSize >> > (n,dev_odata, dev_idata, dev_scanData, dev_boolData);
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy failed!");
//timer().endGpuTimer();
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_boolData);
hipFree(dev_scanData);
return compact_size;
}
}
}
| a5e2aca489e5bc27bc59f0695cb64586cbebd8c0.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweepIteration(int n, int d, int *idata) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
k = k * (1 << (d + 1)) + (1 << (d + 1)) - 1;
if (k > n || k < 0) { return; }
int offset = 1 << d + 1;
int old_val = idata[k];
idata[k] = idata[k] + idata[k - (offset / 2)];
//printf("d = %i, %i off %i %i oldval: %i val: %i\n", d, k, offset, offset / 2,old_val, idata[k]);
}
__global__ void kernDownSweepIteration(int n, int d, int *idata) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
k = k * (1 << (d + 1)) + (1 << (d + 1)) - 1;
if (k > n || k < 0) { return; }
int offset = 1 << d + 1;
int add = idata[k] + idata[k - (offset / 2)];
int replace = idata[k];
int old_val = idata[k];
idata[k - (offset / 2)] = replace;
idata[k] = add;
//printf("d = %i, %i off %i %i oldval: %i val: %i\n", d, k, offset, offset / 2, old_val, idata[k]);
}
__global__ void kernSetZero(int n, int *idata) {
idata[n] = 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
dim3 threadsPerBlock(128);
int blockSize = 128;
int arr_size = n;
int log = ilog2ceil(arr_size);
if ((n & (n - 1)) != 0) {
arr_size = 1 << log;
}
int* dev_odata;
cudaMalloc((void**)&dev_odata, arr_size * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
int* dev_shiftdata;
cudaMalloc((void**)&dev_shiftdata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_shiftdata failed!");
cudaMemcpy(dev_odata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
timer().startGpuTimer();
//UPSWEEP
log = ilog2ceil(arr_size);
for (int d = 0; d < log; d++) {
int off_n = arr_size / (1 << (d + 1));
dim3 fullBlocksPerGrid((off_n + blockSize - 1) / blockSize);
kernUpSweepIteration << <fullBlocksPerGrid, blockSize >> > (arr_size, d, dev_odata);
}
//DOWNSWEEP
kernSetZero <<<1, 1 >>> (arr_size - 1, dev_odata);
for (int d = log - 1; d >= 0; d--) {
int off_n = arr_size / (1 << (d + 1));
dim3 fullBlocksPerGrid((off_n + blockSize - 1) / blockSize);
kernDownSweepIteration << <fullBlocksPerGrid, blockSize >> > (arr_size, d, dev_odata);
}
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy failed!");
cudaFree(dev_odata);
cudaFree(dev_shiftdata);
}
__global__ void kernMapToBoolean(int n, int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > n) { return; }
int data = idata[index];
if (data != 0) {
idata[index] = 1;
}
}
__global__ void kernScatter(int n, int *odata, int *idata, int *scanOutData, int *boolOutData) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > n) { return; }
int boolIndex = boolOutData[index];
if (boolIndex != 0) {
odata[scanOutData[index]] = idata[index];
}
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int blockSize = 128;
dim3 threadsPerBlock(blockSize);
int* dev_idata;
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
int* dev_boolData;
cudaMalloc((void**)&dev_boolData, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
cudaMemcpy(dev_boolData, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
int* dev_scanData;
cudaMalloc((void**)&dev_scanData, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
int* dev_odata;
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
//timer().startGpuTimer();
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_boolData);
int *boolOutData = new int[n];
cudaMemcpy(boolOutData, dev_boolData, sizeof(int) * n, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy failed!");
/*for (int i = 0; i < 10; i++) {
printf("%i %i\n", i, boolOutData[i]);
}*/
int *scanOutData = new int[n];
scan(n,scanOutData ,boolOutData);
int compact_size = scanOutData[n-1] + boolOutData[n-1];
cudaMemcpy(dev_scanData, scanOutData, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy failed!");
kernScatter << <fullBlocksPerGrid, blockSize >> > (n,dev_odata, dev_idata, dev_scanData, dev_boolData);
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy failed!");
//timer().endGpuTimer();
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_boolData);
cudaFree(dev_scanData);
return compact_size;
}
}
}
|
647930dac93caa43e15e940267ee1a266dcb80f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlaswp.cu normal z -> c, Wed Sep 17 15:08:23 2014
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "common_magma.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
magmaFloatComplex *dAT;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} claswp_params_t;
// Matrix A is stored row-wise in dAT.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void claswp_kernel( claswp_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int lda = params.lda;
magmaFloatComplex *dAT = params.dAT + tid + params.j0*lda;
magmaFloatComplex *A1 = dAT;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaFloatComplex *A2 = dAT + i2*lda;
magmaFloatComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
// Launch claswp kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void claswp_launch( claswp_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
hipLaunchKernelGGL(( claswp_kernel), dim3(blocks), dim3(NTHREADS), 0, queue , params );
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version updates each entry of ipiv by adding ind.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in cgetrf, cgetrf_gpu, cgetrf_mgpu, cgetrf_ooc.
extern "C" void
magmablas_cpermute_long2( magma_int_t n, magmaFloatComplex *dAT, magma_int_t lda,
magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
claswp_params_t params = { dAT, n, lda, ind + k, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1;
ipiv[ind + k + j] += ind;
}
claswp_launch( params, magma_stream );
}
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version assumes ind has already been added to ipiv.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in cgetrf_mgpu, cgetrf_ooc.
extern "C" void
magmablas_cpermute_long3( magmaFloatComplex *dAT, magma_int_t lda,
const magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
claswp_params_t params = { dAT, lda, lda, ind + k, npivots };
for( int j = 0; j < MAX_PIVOTS; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1 - ind;
}
claswp_launch( params, magma_stream );
}
}
/**
Purpose:
=============
CLASWP performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT COMPLEX array on GPU, stored row-wise, dimension (LDA,N)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A. lda >= n.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k1 <= n.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k2 <= n.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
// It is used in cgessm, cgetrf_incpiv.
extern "C" void
magmablas_claswp_q(
magma_int_t n, magmaFloatComplex *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 1 || k1 > n )
info = -4;
else if ( k2 < 1 || k2 > n )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dAT n lda j0 npivots
claswp_params_t params = { dAT+k*lda, n, lda, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
claswp_launch( params, queue );
}
}
/**
@see magmablas_claswp_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswp( magma_int_t n, magmaFloatComplex *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
magmablas_claswp_q( n, dAT, lda, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// Extended version has stride in both directions (ldx, ldy)
// to handle both row-wise and column-wise storage.
typedef struct {
magmaFloatComplex *dA;
int n, ldx, ldy, j0, npivots;
int ipiv[MAX_PIVOTS];
} claswpx_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void claswpx_kernel( claswpx_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int ldx = params.ldx;
magmaFloatComplex *dA = params.dA + tid*params.ldy + params.j0*ldx;
magmaFloatComplex *A1 = dA;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaFloatComplex *A2 = dA + i2*ldx;
magmaFloatComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldx; // A1 = dA + i1*ldx
}
}
}
// Launch claswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void claswpx( claswpx_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
hipLaunchKernelGGL(( claswpx_kernel), dim3(blocks), dim3(NTHREADS), 0, queue , params );
}
/**
Purpose:
=============
CLASWPX performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored either row-wise or column-wise,
depending on ldx and ldy. **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dA COMPLEX array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
ldx INTEGER
Stride between elements in same column.
\param[in]
ldy INTEGER
Stride between elements in same row.
For A stored row-wise, set ldx=lda and ldy=1.
For A stored column-wise, set ldx=1 and ldy=lda.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswpx_q(
magma_int_t n, magmaFloatComplex *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n ldx ldy j0 npivots
claswpx_params_t params = { dA+k*ldx, n, ldx, ldy, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
claswpx( params, queue );
}
}
/**
@see magmablas_claswpx_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswpx( magma_int_t n, magmaFloatComplex *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
return magmablas_claswpx_q( n, dA, ldx, ldy, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// This version takes d_ipiv on the GPU. Thus it does not pass pivots
// as an argument using a structure, avoiding all the argument size
// limitations of CUDA and OpenCL. It also needs just one kernel launch
// with all the pivots, instead of multiple kernel launches with small
// batches of pivots. On Fermi, it is faster than magmablas_claswp
// (including copying pivots to the GPU).
__global__ void claswp2_kernel(
int n, magmaFloatComplex *dAT, int lda, int npivots,
const magma_int_t* d_ipiv, magma_int_t inci )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < n ) {
dAT += tid;
magmaFloatComplex *A1 = dAT;
for( int i1 = 0; i1 < npivots; ++i1 ) {
int i2 = d_ipiv[i1*inci] - 1; // Fortran index
magmaFloatComplex *A2 = dAT + i2*lda;
magmaFloatComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
/**
Purpose:
=============
CLASWP2 performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Here, d_ipiv is passed in GPU memory.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT COMPLEX array on GPU, stored row-wise, dimension (LDA,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A.
(I.e., stride between elements in a column.)
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswp2_q(
magma_int_t n, magmaFloatComplex* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
int blocks = (n + NTHREADS - 1) / NTHREADS;
hipLaunchKernelGGL(( claswp2_kernel), dim3(blocks), dim3(NTHREADS), 0, queue ,
n, dAT + (k1-1)*lda, lda, k2-(k1-1), d_ipiv, inci );
}
/**
@see magmablas_claswp2_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswp2( magma_int_t n, magmaFloatComplex* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci )
{
magmablas_claswp2_q( n, dAT, lda, k1, k2, d_ipiv, inci, magma_stream );
}
| 647930dac93caa43e15e940267ee1a266dcb80f5.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlaswp.cu normal z -> c, Wed Sep 17 15:08:23 2014
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "common_magma.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
magmaFloatComplex *dAT;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} claswp_params_t;
// Matrix A is stored row-wise in dAT.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void claswp_kernel( claswp_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int lda = params.lda;
magmaFloatComplex *dAT = params.dAT + tid + params.j0*lda;
magmaFloatComplex *A1 = dAT;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaFloatComplex *A2 = dAT + i2*lda;
magmaFloatComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
// Launch claswp kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void claswp_launch( claswp_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
claswp_kernel<<< blocks, NTHREADS, 0, queue >>>( params );
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version updates each entry of ipiv by adding ind.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in cgetrf, cgetrf_gpu, cgetrf_mgpu, cgetrf_ooc.
extern "C" void
magmablas_cpermute_long2( magma_int_t n, magmaFloatComplex *dAT, magma_int_t lda,
magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
claswp_params_t params = { dAT, n, lda, ind + k, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1;
ipiv[ind + k + j] += ind;
}
claswp_launch( params, magma_stream );
}
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version assumes ind has already been added to ipiv.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in cgetrf_mgpu, cgetrf_ooc.
extern "C" void
magmablas_cpermute_long3( magmaFloatComplex *dAT, magma_int_t lda,
const magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
claswp_params_t params = { dAT, lda, lda, ind + k, npivots };
for( int j = 0; j < MAX_PIVOTS; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1 - ind;
}
claswp_launch( params, magma_stream );
}
}
/**
Purpose:
=============
CLASWP performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT COMPLEX array on GPU, stored row-wise, dimension (LDA,N)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A. lda >= n.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k1 <= n.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k2 <= n.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
// It is used in cgessm, cgetrf_incpiv.
extern "C" void
magmablas_claswp_q(
magma_int_t n, magmaFloatComplex *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 1 || k1 > n )
info = -4;
else if ( k2 < 1 || k2 > n )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dAT n lda j0 npivots
claswp_params_t params = { dAT+k*lda, n, lda, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
claswp_launch( params, queue );
}
}
/**
@see magmablas_claswp_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswp( magma_int_t n, magmaFloatComplex *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
magmablas_claswp_q( n, dAT, lda, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// Extended version has stride in both directions (ldx, ldy)
// to handle both row-wise and column-wise storage.
typedef struct {
magmaFloatComplex *dA;
int n, ldx, ldy, j0, npivots;
int ipiv[MAX_PIVOTS];
} claswpx_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void claswpx_kernel( claswpx_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int ldx = params.ldx;
magmaFloatComplex *dA = params.dA + tid*params.ldy + params.j0*ldx;
magmaFloatComplex *A1 = dA;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaFloatComplex *A2 = dA + i2*ldx;
magmaFloatComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldx; // A1 = dA + i1*ldx
}
}
}
// Launch claswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void claswpx( claswpx_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
claswpx_kernel<<< blocks, NTHREADS, 0, queue >>>( params );
}
/**
Purpose:
=============
CLASWPX performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored either row-wise or column-wise,
depending on ldx and ldy. **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dA COMPLEX array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
ldx INTEGER
Stride between elements in same column.
\param[in]
ldy INTEGER
Stride between elements in same row.
For A stored row-wise, set ldx=lda and ldy=1.
For A stored column-wise, set ldx=1 and ldy=lda.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswpx_q(
magma_int_t n, magmaFloatComplex *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n ldx ldy j0 npivots
claswpx_params_t params = { dA+k*ldx, n, ldx, ldy, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
claswpx( params, queue );
}
}
/**
@see magmablas_claswpx_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswpx( magma_int_t n, magmaFloatComplex *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
return magmablas_claswpx_q( n, dA, ldx, ldy, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// This version takes d_ipiv on the GPU. Thus it does not pass pivots
// as an argument using a structure, avoiding all the argument size
// limitations of CUDA and OpenCL. It also needs just one kernel launch
// with all the pivots, instead of multiple kernel launches with small
// batches of pivots. On Fermi, it is faster than magmablas_claswp
// (including copying pivots to the GPU).
__global__ void claswp2_kernel(
int n, magmaFloatComplex *dAT, int lda, int npivots,
const magma_int_t* d_ipiv, magma_int_t inci )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < n ) {
dAT += tid;
magmaFloatComplex *A1 = dAT;
for( int i1 = 0; i1 < npivots; ++i1 ) {
int i2 = d_ipiv[i1*inci] - 1; // Fortran index
magmaFloatComplex *A2 = dAT + i2*lda;
magmaFloatComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
/**
Purpose:
=============
CLASWP2 performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Here, d_ipiv is passed in GPU memory.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT COMPLEX array on GPU, stored row-wise, dimension (LDA,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A.
(I.e., stride between elements in a column.)
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswp2_q(
magma_int_t n, magmaFloatComplex* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
int blocks = (n + NTHREADS - 1) / NTHREADS;
claswp2_kernel<<< blocks, NTHREADS, 0, queue >>>(
n, dAT + (k1-1)*lda, lda, k2-(k1-1), d_ipiv, inci );
}
/**
@see magmablas_claswp2_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claswp2( magma_int_t n, magmaFloatComplex* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci )
{
magmablas_claswp2_q( n, dAT, lda, k1, k2, d_ipiv, inci, magma_stream );
}
|
dafa3b32e2c672a6c9115720440975d23e081e42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
#include "external_field_test.cuh"
#include "hoomd/GPUPolymorph.cuh"
namespace gpu
{
namespace kernel
{
__global__ void test_external_field(Scalar3* out, const mpcd::ExternalField* field, const Scalar3* pos, const unsigned int N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
out[idx] = field->evaluate(pos[idx]);
}
} // end namespace kernel
hipError_t test_external_field(Scalar3* out, const mpcd::ExternalField* field, const Scalar3* pos, const unsigned int N)
{
const unsigned int block_size = 32;
const unsigned int num_blocks = (N + block_size - 1)/block_size;
hipLaunchKernelGGL(( kernel::test_external_field), dim3(num_blocks),dim3(block_size), 0, 0, out, field, pos, N);
return hipSuccess;
}
} // end namespace gpu
| dafa3b32e2c672a6c9115720440975d23e081e42.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
#include "external_field_test.cuh"
#include "hoomd/GPUPolymorph.cuh"
namespace gpu
{
namespace kernel
{
__global__ void test_external_field(Scalar3* out, const mpcd::ExternalField* field, const Scalar3* pos, const unsigned int N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
out[idx] = field->evaluate(pos[idx]);
}
} // end namespace kernel
cudaError_t test_external_field(Scalar3* out, const mpcd::ExternalField* field, const Scalar3* pos, const unsigned int N)
{
const unsigned int block_size = 32;
const unsigned int num_blocks = (N + block_size - 1)/block_size;
kernel::test_external_field<<<num_blocks,block_size>>>(out, field, pos, N);
return cudaSuccess;
}
} // end namespace gpu
|
a4b5d7c27a5d6d49b40b944e681f7dcfe67578ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
__global__ void vAdd(float* A, int num_elements, int factor_hilos, float* s){
//__local__ float a = 0.0;
__shared__ float a;
if(threadIdx.x == 0) a = 0.0;
__syncthreads();
//Posicion del thread
int i = (blockIdx.x * blockDim.x + threadIdx.x);
//printf("Hola desde el hilo %d, en el bloque %d y el hilo %d\n", i, blockIdx.x, threadIdx.x);
if(i < factor_hilos*num_elements){
atomicAdd(&a, A[i%num_elements]);
//atomicAdd(&a, 2);
//A[i%num_elements] = A[i%num_elements] + 1;
}
//A[i%num_elements] = a;
s[0] = a;
//printf("%d", s[0]);
}
void fError(hipError_t err, int i){
if(err != hipSuccess){
printf("%d Ha ocurrido un error con codigo: %s\n", i, hipGetErrorString(err));
}
}
int main(){
//hipSetDevice(1);
int num_elements = 1024;
int factor_hilos = 1;
//Reservar espacio en memoria HOST
float * h_A = (float*)malloc(num_elements * sizeof(float));
if(h_A == NULL ){
printf("Error al reservar memoria para los vectores HOST");
exit(1);
}
float * h_sum = (float*)malloc(sizeof(float));
h_sum[0] = 0;
//Inicializar elementos de los vectores
for(int i=0; i<num_elements; i++){
h_A[i] = (float)i;
}
hipError_t err;
int size = num_elements * sizeof(float);
float * d_A = NULL;
err = hipMalloc((void **)&d_A, size);
fError(err,1);
float * d_sum = NULL;
err = hipMalloc((void **)&d_sum, sizeof(float));
fError(err, 3);
//Copiamos a GPU DEVICE
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_sum, h_sum, sizeof(float), hipMemcpyHostToDevice);
int HilosPorBloque = 256;
int BloquesPorGrid = (factor_hilos*num_elements + HilosPorBloque -1) / HilosPorBloque;
hipError_t Err;
//Lanzamos el kernel y medimos tiempos
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( vAdd), dim3(BloquesPorGrid), dim3(HilosPorBloque), 0, 0, d_A, num_elements, factor_hilos, d_sum);
Err = hipGetLastError();
fError(Err,2);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float tiempo_reserva_host;
hipEventElapsedTime(&tiempo_reserva_host, start, stop);
printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host);
hipEventDestroy(start);
hipEventDestroy(stop);
//Copiamos a CPU el vector C
err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
hipMemcpy(h_sum, d_sum, sizeof(float), hipMemcpyDeviceToHost);
/*for(int i=0; i<20; i++){
printf("%f ", h_A[i]);
//printf("\n");
}*/
printf("La suma es: %f", h_sum[0]);
}
| a4b5d7c27a5d6d49b40b944e681f7dcfe67578ba.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
__global__ void vAdd(float* A, int num_elements, int factor_hilos, float* s){
//__local__ float a = 0.0;
__shared__ float a;
if(threadIdx.x == 0) a = 0.0;
__syncthreads();
//Posicion del thread
int i = (blockIdx.x * blockDim.x + threadIdx.x);
//printf("Hola desde el hilo %d, en el bloque %d y el hilo %d\n", i, blockIdx.x, threadIdx.x);
if(i < factor_hilos*num_elements){
atomicAdd(&a, A[i%num_elements]);
//atomicAdd(&a, 2);
//A[i%num_elements] = A[i%num_elements] + 1;
}
//A[i%num_elements] = a;
s[0] = a;
//printf("%d", s[0]);
}
void fError(cudaError_t err, int i){
if(err != cudaSuccess){
printf("%d Ha ocurrido un error con codigo: %s\n", i, cudaGetErrorString(err));
}
}
int main(){
//cudaSetDevice(1);
int num_elements = 1024;
int factor_hilos = 1;
//Reservar espacio en memoria HOST
float * h_A = (float*)malloc(num_elements * sizeof(float));
if(h_A == NULL ){
printf("Error al reservar memoria para los vectores HOST");
exit(1);
}
float * h_sum = (float*)malloc(sizeof(float));
h_sum[0] = 0;
//Inicializar elementos de los vectores
for(int i=0; i<num_elements; i++){
h_A[i] = (float)i;
}
cudaError_t err;
int size = num_elements * sizeof(float);
float * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
fError(err,1);
float * d_sum = NULL;
err = cudaMalloc((void **)&d_sum, sizeof(float));
fError(err, 3);
//Copiamos a GPU DEVICE
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_sum, h_sum, sizeof(float), cudaMemcpyHostToDevice);
int HilosPorBloque = 256;
int BloquesPorGrid = (factor_hilos*num_elements + HilosPorBloque -1) / HilosPorBloque;
cudaError_t Err;
//Lanzamos el kernel y medimos tiempos
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
vAdd<<<BloquesPorGrid, HilosPorBloque>>>(d_A, num_elements, factor_hilos, d_sum);
Err = cudaGetLastError();
fError(Err,2);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float tiempo_reserva_host;
cudaEventElapsedTime(&tiempo_reserva_host, start, stop);
printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Copiamos a CPU el vector C
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost);
/*for(int i=0; i<20; i++){
printf("%f ", h_A[i]);
//printf("\n");
}*/
printf("La suma es: %f", h_sum[0]);
}
|
b5939d77d612702faf53ecf53e1247d07b9cafa1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//Definicion del kernel
__global__ void gpuMatmult(int* m1, int* m2, int* ans, int n){
int k, sum = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
for (k = 0; k < n; k++) {
sum += m1[j * n + k] * m2[k * n + i];
}
ans[j * n + i] = sum;
}
}
int main(int argc, char const *argv[])
{
//Definicion de variables
double timeGPU;
FILE *f1, *f2, *f3;
int *h_m1, *h_m2, *h_ans, *d_m1, *d_m2, *d_ans;
int m1Row, m1Col, m2Row, m2Col;
//Comprobacion de parametros
if (argc != 3){
printf("Cantidad de parametros incorrecta!!\n");
}else{
//Creacion de archivos
f1 = fopen(argv[1],"r");
f2 = fopen(argv[2],"r");
f3 = fopen("matres.txt","w");
//Lectura de dimensiones de matrices
fscanf(f1, "%d", &m1Row); fscanf(f1, "%d", &m1Col);
fscanf(f2, "%d", &m2Row); fscanf(f2, "%d", &m2Col);
//Definicion de tamao para asignar memoria
size_t m1_size = m1Row * m1Col * sizeof(int);
size_t m2_size = m2Row * m2Col * sizeof(int);
size_t ans_size = m1Col * m2Row * sizeof(int);
//Asignacion de memoria en el Host
h_m1 = (int *)malloc(m1_size);
h_m2 = (int *)malloc(m2_size);
h_ans = (int *)malloc(ans_size);
//Lectura de archivos y almacenamiento en el Host
for (int i = 0; i < m1Row; i++){
for (int j = 0; j < m1Col; j++){
fscanf(f1, "%d", &h_m1[i * m1Row + j]);
getc(f1);//saltar las comas (,)
}
}
for (int k = 0; k < m2Row; k++){
for (int l = 0; l < m2Col; l++){
fscanf(f2, "%d", &h_m2[k * m2Row + l]);
getc(f2);//saltar las comas (,)
}
}
//Asignacion de memoria en el Device
if (hipSuccess != hipMalloc((void **) &d_m1, m1_size))
printf("Error asignando memoria para d_m1\n");
if (hipSuccess != hipMalloc((void **) &d_m2, m2_size))
printf("Error asignando memoria para d_m2\n");
if (hipSuccess != hipMalloc((void **) &d_ans, ans_size))
printf("Error asignando memoria para d_ans\n");
//Copia de datos del Host al Device
if (hipSuccess != hipMemcpy(d_m1, h_m1, m1_size, hipMemcpyHostToDevice))
printf("Error copiando datos a d_m1\n");
if (hipSuccess != hipMemcpy(d_m2, h_m2, m2_size, hipMemcpyHostToDevice))
printf("Error copiando datos a d_m2\n");
int size = m1Row;//Tamao de las matrices (ambas cuadradas)
//Definicion de estructuras para la cantidad de hilos y bloques
dim3 blockDim(32,32);
dim3 gridDim((int)ceil((float)size/blockDim.x), (int)ceil((float)size/blockDim.y));
clock_t startGPU = clock();
//LLamado al kernel
hipLaunchKernelGGL(( gpuMatmult), dim3(gridDim), dim3(blockDim), 0, 0, d_m1, d_m2, d_ans, m1Row);
if (hipSuccess != hipGetLastError())
printf("Error en el llamado al kernel\n");
//Copia de datos del Device al Host
if (hipSuccess != hipMemcpy(h_ans, d_ans, ans_size, hipMemcpyDeviceToHost))
printf("Error copiando datos desde d_ans a h_ans\n");
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
//
printf("m1(%d x %d), m2(%d x %d)\n",m1Row,m1Col,m2Row,m2Col);
printf("GPU tiempo = %.6f segundos\n",timeGPU);
//Copia del resutlado en el archivo de respuesta
for (int i = 0; i < m1Row; i++) {
for (int j = 0; j < m2Col; j++) {
fprintf(f3, "%d," ,h_ans[i * m2Col + j]);
}
fseek(f3, -1, SEEK_END);
fprintf(f3, "\n");
}
//Liberacion de memoria
free(h_m1); free(h_m2); free(h_ans);
hipFree(d_m1); hipFree(d_m2); hipFree(h_ans);
//printf("ans[2] = %d\n",h_ans[2]);
}
return 0;
} | b5939d77d612702faf53ecf53e1247d07b9cafa1.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
//Definicion del kernel
__global__ void gpuMatmult(int* m1, int* m2, int* ans, int n){
int k, sum = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
for (k = 0; k < n; k++) {
sum += m1[j * n + k] * m2[k * n + i];
}
ans[j * n + i] = sum;
}
}
int main(int argc, char const *argv[])
{
//Definicion de variables
double timeGPU;
FILE *f1, *f2, *f3;
int *h_m1, *h_m2, *h_ans, *d_m1, *d_m2, *d_ans;
int m1Row, m1Col, m2Row, m2Col;
//Comprobacion de parametros
if (argc != 3){
printf("Cantidad de parametros incorrecta!!\n");
}else{
//Creacion de archivos
f1 = fopen(argv[1],"r");
f2 = fopen(argv[2],"r");
f3 = fopen("matres.txt","w");
//Lectura de dimensiones de matrices
fscanf(f1, "%d", &m1Row); fscanf(f1, "%d", &m1Col);
fscanf(f2, "%d", &m2Row); fscanf(f2, "%d", &m2Col);
//Definicion de tamaño para asignar memoria
size_t m1_size = m1Row * m1Col * sizeof(int);
size_t m2_size = m2Row * m2Col * sizeof(int);
size_t ans_size = m1Col * m2Row * sizeof(int);
//Asignacion de memoria en el Host
h_m1 = (int *)malloc(m1_size);
h_m2 = (int *)malloc(m2_size);
h_ans = (int *)malloc(ans_size);
//Lectura de archivos y almacenamiento en el Host
for (int i = 0; i < m1Row; i++){
for (int j = 0; j < m1Col; j++){
fscanf(f1, "%d", &h_m1[i * m1Row + j]);
getc(f1);//saltar las comas (,)
}
}
for (int k = 0; k < m2Row; k++){
for (int l = 0; l < m2Col; l++){
fscanf(f2, "%d", &h_m2[k * m2Row + l]);
getc(f2);//saltar las comas (,)
}
}
//Asignacion de memoria en el Device
if (cudaSuccess != cudaMalloc((void **) &d_m1, m1_size))
printf("Error asignando memoria para d_m1\n");
if (cudaSuccess != cudaMalloc((void **) &d_m2, m2_size))
printf("Error asignando memoria para d_m2\n");
if (cudaSuccess != cudaMalloc((void **) &d_ans, ans_size))
printf("Error asignando memoria para d_ans\n");
//Copia de datos del Host al Device
if (cudaSuccess != cudaMemcpy(d_m1, h_m1, m1_size, cudaMemcpyHostToDevice))
printf("Error copiando datos a d_m1\n");
if (cudaSuccess != cudaMemcpy(d_m2, h_m2, m2_size, cudaMemcpyHostToDevice))
printf("Error copiando datos a d_m2\n");
int size = m1Row;//Tamaño de las matrices (ambas cuadradas)
//Definicion de estructuras para la cantidad de hilos y bloques
dim3 blockDim(32,32);
dim3 gridDim((int)ceil((float)size/blockDim.x), (int)ceil((float)size/blockDim.y));
clock_t startGPU = clock();
//LLamado al kernel
gpuMatmult<<<gridDim, blockDim>>>(d_m1, d_m2, d_ans, m1Row);
if (cudaSuccess != cudaGetLastError())
printf("Error en el llamado al kernel\n");
//Copia de datos del Device al Host
if (cudaSuccess != cudaMemcpy(h_ans, d_ans, ans_size, cudaMemcpyDeviceToHost))
printf("Error copiando datos desde d_ans a h_ans\n");
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
//
printf("m1(%d x %d), m2(%d x %d)\n",m1Row,m1Col,m2Row,m2Col);
printf("GPU tiempo = %.6f segundos\n",timeGPU);
//Copia del resutlado en el archivo de respuesta
for (int i = 0; i < m1Row; i++) {
for (int j = 0; j < m2Col; j++) {
fprintf(f3, "%d," ,h_ans[i * m2Col + j]);
}
fseek(f3, -1, SEEK_END);
fprintf(f3, "\n");
}
//Liberacion de memoria
free(h_m1); free(h_m2); free(h_ans);
cudaFree(d_m1); cudaFree(d_m2); cudaFree(h_ans);
//printf("ans[2] = %d\n",h_ans[2]);
}
return 0;
} |
57ddd58301a932d5ad4369a323fde448d9b7b600.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ChangeOutputWeightsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outputWeights = NULL;
hipMalloc(&outputWeights, XSIZE*YSIZE);
float *outputWeightDeltas = NULL;
hipMalloc(&outputWeightDeltas, XSIZE*YSIZE);
float *outputDeltas = NULL;
hipMalloc(&outputDeltas, XSIZE*YSIZE);
float *hiddenActivations = NULL;
hipMalloc(&hiddenActivations, XSIZE*YSIZE);
float trainingRate = 1;
float momentum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ChangeOutputWeightsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outputWeights,outputWeightDeltas,outputDeltas,hiddenActivations,trainingRate,momentum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ChangeOutputWeightsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outputWeights,outputWeightDeltas,outputDeltas,hiddenActivations,trainingRate,momentum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ChangeOutputWeightsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outputWeights,outputWeightDeltas,outputDeltas,hiddenActivations,trainingRate,momentum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 57ddd58301a932d5ad4369a323fde448d9b7b600.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ChangeOutputWeightsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outputWeights = NULL;
cudaMalloc(&outputWeights, XSIZE*YSIZE);
float *outputWeightDeltas = NULL;
cudaMalloc(&outputWeightDeltas, XSIZE*YSIZE);
float *outputDeltas = NULL;
cudaMalloc(&outputDeltas, XSIZE*YSIZE);
float *hiddenActivations = NULL;
cudaMalloc(&hiddenActivations, XSIZE*YSIZE);
float trainingRate = 1;
float momentum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ChangeOutputWeightsKernel<<<gridBlock,threadBlock>>>(outputWeights,outputWeightDeltas,outputDeltas,hiddenActivations,trainingRate,momentum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ChangeOutputWeightsKernel<<<gridBlock,threadBlock>>>(outputWeights,outputWeightDeltas,outputDeltas,hiddenActivations,trainingRate,momentum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ChangeOutputWeightsKernel<<<gridBlock,threadBlock>>>(outputWeights,outputWeightDeltas,outputDeltas,hiddenActivations,trainingRate,momentum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
44950917491ea440cd4d6113a5e7e1e00723b1c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void vecAdd(int n, float *a, float *b, float *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+=stride)
c[i] = a[i] + b[i];
}
__global__ void init(int n, float *a, float *b) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
a[i] = 1.0f;
b[i] = 2.0f;
}
}
int main(void)
{
int N = 1<<20;
float *x, *y, *z;
float msec;
hipEvent_t start, stop;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
hipMallocManaged(&z, N*sizeof(float));
// initialize x and y arrays on the host
//for (int i = 0; i < N; i++) {
//x[i] = 1.0f;
// y[i] = 2.0f;
// }
hipEventCreate(&start);
hipEventCreate(&stop);
int blockSize = 256;
int numBlocks = 12; // good enough for P620
hipLaunchKernelGGL(( init), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
hipEventRecord(start);
hipLaunchKernelGGL(( vecAdd), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y, z);
hipEventRecord(stop);
// Wait for GPU to finish before accessing on host
hipEventSynchronize(stop);
hipEventElapsedTime(&msec, start, stop);
printf("Kernel time: %f ms\n", msec);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(z[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
hipFree(z);
return 0;
}
| 44950917491ea440cd4d6113a5e7e1e00723b1c8.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void vecAdd(int n, float *a, float *b, float *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+=stride)
c[i] = a[i] + b[i];
}
__global__ void init(int n, float *a, float *b) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
a[i] = 1.0f;
b[i] = 2.0f;
}
}
int main(void)
{
int N = 1<<20;
float *x, *y, *z;
float msec;
cudaEvent_t start, stop;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&z, N*sizeof(float));
// initialize x and y arrays on the host
//for (int i = 0; i < N; i++) {
//x[i] = 1.0f;
// y[i] = 2.0f;
// }
cudaEventCreate(&start);
cudaEventCreate(&stop);
int blockSize = 256;
int numBlocks = 12; // good enough for P620
init<<<numBlocks, blockSize>>>(N, x, y);
cudaEventRecord(start);
vecAdd<<<numBlocks, blockSize>>>(N, x, y, z);
cudaEventRecord(stop);
// Wait for GPU to finish before accessing on host
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msec, start, stop);
printf("Kernel time: %f ms\n", msec);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(z[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
d54826d9f099ab152f1f43158f471b5dc5b14b78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <utility>
#include <memory.h>
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#include "../Settings.h"
#include "../BitHelper.h"
#include "../LZSSInterface.h"
// Minor difference to the implementation of BlockDecompress.
__global__ void DecompressKernel(CompressFlagBlock* deviceFlagIn, int nFlagBlocks,
const uint8_t* deviceInBuf, uint8_t* deviceOutBuf)
{
uint8_t outBuf[DataBlockSize];
// This is the data blockId. Each thread is responsible for a 4KB data block.
auto blockId = blockIdx.x * blockDim.x + threadIdx.x;
if (blockId < nFlagBlocks) {
CompressFlagBlock* flagBlock = &deviceFlagIn[blockId];
auto inOffset = flagBlock->CompressedOffset;
auto outOffset = 0;
for (int j = 0; j < flagBlock->NumOfFlags; ++j) {
if (GET_BIT(flagBlock->Flags, j) == 0) {
// Single character
outBuf[outOffset] = deviceInBuf[inOffset];
++inOffset;
++outOffset;
} else {
// Replacement pair
PairType matchPair;
memcpy(&matchPair, deviceInBuf + inOffset, sizeof(PairType));
// Plus 1 for the opposite operation in compression
auto matchOffset = (matchPair >> PairLengthBits) + 1;
auto matchLength = (matchPair & (MaxEncodeLength - 1)) + 1;
// May overlap, so manually copy
for (int k = 0; k < matchLength; ++k) {
outBuf[outOffset] = outBuf[outOffset - matchOffset];
++outOffset;
}
inOffset += sizeof(PairType);
}
}
// Copy back to global memory
memcpy(deviceOutBuf + blockId * DataBlockSize, outBuf, outOffset);
}
}
| d54826d9f099ab152f1f43158f471b5dc5b14b78.cu | #include <utility>
#include <memory.h>
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#include "../Settings.h"
#include "../BitHelper.h"
#include "../LZSSInterface.h"
// Minor difference to the implementation of BlockDecompress.
__global__ void DecompressKernel(CompressFlagBlock* deviceFlagIn, int nFlagBlocks,
const uint8_t* deviceInBuf, uint8_t* deviceOutBuf)
{
uint8_t outBuf[DataBlockSize];
// This is the data blockId. Each thread is responsible for a 4KB data block.
auto blockId = blockIdx.x * blockDim.x + threadIdx.x;
if (blockId < nFlagBlocks) {
CompressFlagBlock* flagBlock = &deviceFlagIn[blockId];
auto inOffset = flagBlock->CompressedOffset;
auto outOffset = 0;
for (int j = 0; j < flagBlock->NumOfFlags; ++j) {
if (GET_BIT(flagBlock->Flags, j) == 0) {
// Single character
outBuf[outOffset] = deviceInBuf[inOffset];
++inOffset;
++outOffset;
} else {
// Replacement pair
PairType matchPair;
memcpy(&matchPair, deviceInBuf + inOffset, sizeof(PairType));
// Plus 1 for the opposite operation in compression
auto matchOffset = (matchPair >> PairLengthBits) + 1;
auto matchLength = (matchPair & (MaxEncodeLength - 1)) + 1;
// May overlap, so manually copy
for (int k = 0; k < matchLength; ++k) {
outBuf[outOffset] = outBuf[outOffset - matchOffset];
++outOffset;
}
inOffset += sizeof(PairType);
}
}
// Copy back to global memory
memcpy(deviceOutBuf + blockId * DataBlockSize, outBuf, outOffset);
}
}
|
94b5127711090a02023234078703f36263443dfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zgeadd_batched.cu, normal z -> c, Thu Oct 8 23:05:35 2020
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches clacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
cgeadd_batched_kernel(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaFloatComplex *dA = dAarray[ blockIdx.y ];
magmaFloatComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaFloatComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_cgeadd_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( m, NB ), ibatch );
hipLaunchKernelGGL(( cgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dAarray+i, ldda, dBarray+i, lddb );
}
}
| 94b5127711090a02023234078703f36263443dfa.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zgeadd_batched.cu, normal z -> c, Thu Oct 8 23:05:35 2020
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches clacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
cgeadd_batched_kernel(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaFloatComplex *dA = dAarray[ blockIdx.y ];
magmaFloatComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaFloatComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_cgeadd_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( m, NB ), ibatch );
cgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, dAarray+i, ldda, dBarray+i, lddb );
}
}
|
e17f47175a6e68262377828e9a92358c5496ca7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "fmaf_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_x = NULL;
hipMalloc(&d_x, XSIZE*YSIZE);
float *d_y = NULL;
hipMalloc(&d_y, XSIZE*YSIZE);
float *d_z = NULL;
hipMalloc(&d_z, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
fmaf_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_x,d_y,d_z,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
fmaf_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_x,d_y,d_z,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
fmaf_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_x,d_y,d_z,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e17f47175a6e68262377828e9a92358c5496ca7c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "fmaf_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_x = NULL;
cudaMalloc(&d_x, XSIZE*YSIZE);
float *d_y = NULL;
cudaMalloc(&d_y, XSIZE*YSIZE);
float *d_z = NULL;
cudaMalloc(&d_z, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
fmaf_kernel<<<gridBlock,threadBlock>>>(d_x,d_y,d_z,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
fmaf_kernel<<<gridBlock,threadBlock>>>(d_x,d_y,d_z,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
fmaf_kernel<<<gridBlock,threadBlock>>>(d_x,d_y,d_z,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a536985b9d9246198b345bec3b302d17c7e1fce6.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| a536985b9d9246198b345bec3b302d17c7e1fce6.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
bd18a04339660920b377f25d5e59f78fdbc6fa24.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "mpi.h"
#include <stdbool.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define XMIN 0.0
#define XMAX 1.0
#define YMIN 0.0
#define YMAX 1.0
#define USE_MATH_DEFINES
#define MAX_PSZ 10
#define TSCALE 1.0
#define VSQR 0.1
#define NINEPTSTENCIL 1
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
void init(double *u, double *pebbles, int n);
void evolve(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t);
void evolve9pt(double *un, double *uc, double *uo, double *pebbles, int n, int rank, double *row, double *col, double *indv, double h, double dt, double t);
void evolve9pt_1(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t);
int tpdt(double *t, double dt, double end_time);
void print_heatmap(char *filename, double *u, int n, double h);
void init_pebbles(double *p, int pn, int n);
void run_cpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time);
void transfer(double *from, double *to, int r, int n, bool dir);
void dest(double *source, double *row, double *col, double *indv, int *hor, int *ver, int *diag, int rank, int size);
extern void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads);
/**************************************
* void __cudaSafeCall(hipError_t err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
__device__ double f_CUDA(double p, double t)
{
return -__expf(-TSCALE * t) * p;
}
__global__ void evolve9ptCUDA(double *un, double *uc, double *uo, double *pebbles, int n, int rank, double *row, double *col, double *indv, double h, double dt, double t) {
int idx = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y;
int i = idx / n;
int j = idx % n;
if(!(i == 0 || i == n - 1 || j == 0 || j == n - 1)) {
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else {
// Code for the fringe regions goes here...
switch (rank) {
case 1:
if (i==0 || j==0) {
un[idx]=0.;
}
else if(i==n-1 && j==n-1) { // Bottom right corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + row[j] + uc[idx - n] + 0.25*(row[j-1] + *indv + uc[idx - n - 1] + col[i-1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==n-1 && i<n-1) { // Right edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + col[i + 1] + uc[idx - n - 1] + col[i-1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==n-1 && j<n-1) { // Bottom edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + row[j] + uc[idx - n] + 0.25*(row[j - 1] + row[j + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
case 2:
if (i==0 || j==n-1){
un[idx]=0.;
}
else if(i==n-1 && j==0) { // Bottom left corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + row[j] + uc[idx - n] + 0.25*(*indv + row[j + 1] + col[i - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==0 && i<n-1) { // Left edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(col[i+1] + uc[idx + n + 1] + col[i-1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==n-1 && j>0) { // Bottom Edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + row[j] + uc[idx - n] + 0.25*(row[j - 1] + row[j + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
case 3:
if (i==n-1 || j==0){
un[idx]=0.;
}
else if(i==0 && j==n-1) { // Top right corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + uc[idx + n] + row[j] + 0.25*(uc[idx + n - 1] + col[i+1] + row[j-1] + *indv)- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==n-1 && i>0) { // Left edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + col[i+1] + uc[idx - n - 1] + col[i-1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==0 && j<n-1) { // Top Edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + row[j] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + row[j-1] + row[j+ 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
case 4:
if (i==n-1 || j==n-1){
un[idx]=0.;
}
else if(i==0 && j==0) { // Top left corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + uc[idx + n] + row[j] + 0.25*(col[i+1] + uc[idx+n+1] + *indv + row[j+1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==0 && i>0) { // Right edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(col[i+1] + uc[idx + n + 1] + col[i-1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==0 && j>0) { // Top Edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + row[j] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + row[j-1] + row[j+1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
}
}}
int main(int argc, char *argv[])
{
int numproc, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Status *status;
MPI_Request *request;
request = (MPI_Request *) malloc(numproc * sizeof(MPI_Request));
status = (MPI_Status *) malloc(numproc * sizeof(MPI_Status));
int npoints = 256;// atoi(argv[1]);
int npebs = 3;// atoi(argv[2]);
double end_time = 1.00;// (double)atof(argv[3]);
int nthreads = 1024;// atoi(argv[4]);
int narea = npoints * npoints;
bool once = true;
int size=(npoints/2)*(npoints/2);
double t, dt;
double h = (XMAX - XMIN)/npoints;
if (rank == 0) {
double *u_i0, *u_i1;
double *u_cpu, *u_gpu, *pebs;
double *peb, *n_cpu; //1, *n_cpu2, *n_cpu3, *n_cpu4;
double elapsed_cpu, elapsed_gpu;
struct timeval cpu_start, cpu_end, gpu_start, gpu_end;
peb = (double*)malloc(sizeof(double) * size);
u_i0 = (double*)malloc(sizeof(double) * narea);
u_i1 = (double*)malloc(sizeof(double) * narea);
u_cpu = (double*)malloc(sizeof(double) * narea);
u_gpu = (double*)malloc(sizeof(double) * narea);
n_cpu = (double*)malloc(sizeof(double) * size);
pebs = (double*)malloc(sizeof(double) * narea);
printf("Rank0: Running a (%d x %d) grid, until %f, with %d threads\n", npoints, npoints, end_time, nthreads);
init_pebbles(pebs, npebs, npoints);
init(u_i0, pebs, npoints);
init(u_i1, pebs, npoints);
// Initial
run_cpu(u_gpu, u_i0, u_i1, pebs, npoints, h, end_time);
print_heatmap("lake_cpu_f.dat", u_gpu, npoints, h);
// Tranfer to MPI nodes
int i;
for (i=1; i<numproc; i++) {
transfer(pebs, peb, i, npoints, true); //get corresponding data
MPI_Send(peb,size, MPI_DOUBLE, i, i, MPI_COMM_WORLD);
}
/*-----------------------------------*/
/* Stitch individual nodes together */
/*
MPI_Recv(n_cpu, size, MPI_DOUBLE, 1, 11, MPI_COMM_WORLD, &status[1]);
transfer(n_cpu, u_cpu, 1, npoints, false);
MPI_Recv(n_cpu, size, MPI_DOUBLE, 2, 12, MPI_COMM_WORLD, &status[2]);
transfer(n_cpu, u_cpu, 2, npoints, false);
MPI_Recv(n_cpu, size, MPI_DOUBLE, 3, 13, MPI_COMM_WORLD, &status[3]);
transfer(n_cpu, u_cpu, 3, npoints, false);
MPI_Recv(n_cpu, size, MPI_DOUBLE, 4, 14, MPI_COMM_WORLD, &status[4]);
transfer(n_cpu, u_cpu, 4, npoints, false);
// Save final Image
print_heatmap("lake_cpu_mpi.dat", u_cpu, npoints, h);
*/
/*-----------------------------------*/
}
else {
/* For Reference:
+ : Fringe edges
X : Diagonal fringe point
|``````````````````+|+````````````````|
| +|+ |
| +|+ |
| Rank 1 +|+ Rank 2 |
| +|+ |
|++++++++++++++++++X|X++++++++++++++++|
|++++++++++++++++++X|X++++++++++++++++|
| +|+ |
| +|+ |
| Rank 3 +|+ Rank 4 |
| +|+ |
| +|+ |
| +|+ |
```````````````````````````````````````
*/
hipEvent_t kstart, kstop;
float ktime;
int number_amount;
double *un , *u0 , *u1 , *uc , *uo, *pebble;
double *d_un, *d_uc, *d_uo, *d_pebble, *d_temp;
int n = npoints/2;
u0 = (double*)malloc(sizeof(double) * n*n);
u1 = (double*)malloc(sizeof(double) * n*n);
un = (double*)malloc(sizeof(double) * n*n);
uc = (double*)malloc(sizeof(double) * n*n);
uo = (double*)malloc(sizeof(double) * n*n);
pebble = (double*)malloc(sizeof(double) * n*n);
MPI_Recv(pebble, size, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/*-----------------------------------*/
/* Sanity Check!*/ /*
MPI_Get_count(&status, MPI_INT, &number_amount);
printf("1 received %d numbers from 0. Message source = %d, "
"tag = %d\n",
number_amount, status.MPI_SOURCE, status.MPI_TAG);
/*-----------------------------------*/
init(u0, pebble, npoints/2);
init(u1, pebble, npoints/2);
// Begin Timer
t = 0.;
dt = h / 2.;
CUDA_CALL(hipSetDevice(0));
CUDA_CALL(hipEventCreate(&kstart));
CUDA_CALL(hipEventCreate(&kstop));
hipMalloc((void **)&d_un, sizeof(double) * n * n);
hipMalloc((void **)&d_uc, sizeof(double) * n * n);
hipMalloc((void **)&d_uo, sizeof(double) * n * n);
hipMalloc((void **)&d_pebble, sizeof(double) * n * n);
hipMemcpy(d_uo, u0, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipMemcpy(d_uc, u1, sizeof(double) * n * n, hipMemcpyHostToDevice);
hipMemcpy(d_pebble, pebble, sizeof(double) * n * n, hipMemcpyHostToDevice);
dim3 block_dim(nthreads, nthreads,1);
dim3 grid_dim(n/nthreads, n/nthreads,1);
/* Start GPU computation timer */
CUDA_CALL(hipEventRecord(kstart, 0));
while(1) {
// What to send where
double *row, *col, *indv;
row = (double*)malloc(sizeof(double) * npoints/2);
col = (double*)malloc(sizeof(double) * npoints/2);
indv = (double*)malloc(sizeof(double));
int *hor, *ver, *diag;
hor = (int*)malloc(sizeof(int));
ver = (int*)malloc(sizeof(int));
diag = (int*)malloc(sizeof(int));
dest(un, row, col, indv, hor, ver, diag, rank, npoints/2);
// Send boundaries to respective neighbours
MPI_Send(row , npoints/2, MPI_DOUBLE, *ver , rank, MPI_COMM_WORLD);
MPI_Send(col , npoints/2, MPI_DOUBLE, *hor , rank, MPI_COMM_WORLD);
MPI_Send(indv, 1, MPI_DOUBLE, *diag, rank, MPI_COMM_WORLD);
// Compute turbulance: Receive neighbours
MPI_Recv(row, npoints/2, MPI_DOUBLE, *hor, *hor, MPI_COMM_WORLD, &status[rank]);
MPI_Recv(col, npoints/2, MPI_DOUBLE, *ver, *ver, MPI_COMM_WORLD, &status[rank]);
MPI_Recv(indv, 1, MPI_DOUBLE, *diag, *diag, MPI_COMM_WORLD, &status[rank]);
// Nine point stencil on CUDA cores
hipLaunchKernelGGL(( evolve9ptCUDA), dim3(grid_dim), dim3(block_dim), 0, 0, d_un, d_uc, d_uo, d_pebble, n, rank, row, col, indv, h, dt, t);
d_temp = d_uc;
d_uc = d_un;
d_un = d_uo;
d_uo = d_temp;
if(!tpdt(&t,dt,end_time)) {
break;
}
}
hipMemcpy(un, d_un, sizeof(double) * n * n, hipMemcpyDeviceToHost);
/*-----------------------------------*/
// // Send final results to Rank 0.
// MPI_Isend(un, size, MPI_DOUBLE, 0, rank+10, MPI_COMM_WORLD, &request[0]);
// printf("Done\n");
/*-----------------------------------*/
// Initial Output files
char* s;
s = (char*)malloc(sizeof(char)*17);
int k = sprintf(s, "lake_node_%d.dat", rank);
if (k>=0)
print_heatmap(s, un, npoints/2, h);
else {
printf("Error in filename!\n");
MPI_Finalize();
return 0;
}
}
MPI_Finalize();
return 0;
}
void dest(double *source, double *row, double *col, double *indv, int *hor, int *ver, int *diag, int myrank, int size) {
int i, x, y;
switch (myrank) {
case 1:
for(i=0; i<size; i++) {
x=size*(size-1)+i;
y=i*size+(size-1);
row[i]=source[x];
col[i]=source[y];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 2;
*diag = 4;
*hor = 3;
break;
case 2:
for(i=0; i<size; i++) {
x=size*(size-1)+i;
y=i;
row[i]=source[x];
col[i]=source[i];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 1;
*diag = 3;
*hor = 4;
break;
case 3:
for(i=0; i<size; i++) {
x=i;
y=i*size+(size-1);
row[i]=source[x];
col[i]=source[y];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 4;
*diag = 2;
*hor = 1;
break;
case 4:
for(i=0; i<size; i++) {
x=i;
y=i;
row[i]=source[x];
col[i]=source[y];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 3;
*diag = 1;
*hor = 2;
break;
}
}
void transfer(double *from, double *to, int r, int n, bool dir) {
// This is really naive. I'll probably change it.
int x,y, idx_t, idx_f;
for (x=0; x<(int) n/2; x++)
for (y=0; y<(int) n/2; y++) {
if (r==1) {
idx_t = x*n/2+y;
idx_f = x*n+y;
}
else if (r==2) {
idx_t = x*n/2+y;
idx_f = x*n+n/2+y;
}
else if (r==3) {
idx_t=x*n/2+y;
idx_f=(x+n/2)*n+y;
}
else if (r==4) {
idx_t=x*n/2+y;
idx_f=(x+n/2)*n+y+n/2;
}
if (dir==true)
to[idx_t]=from[idx_f];
else
to[idx_f]=from[idx_t];
}
}
void init_pebbles(double *p, int pn, int n)
{
int i, j, k, idx;
int sz;
srand( time(NULL) );
memset(p, 0, sizeof(double) * n * n);
for( k = 0; k < pn ; k++ )
{
i = rand() % (n - 4) + 2;
j = rand() % (n - 4) + 2;
sz = rand() % MAX_PSZ;
idx = j + i * n;
p[idx] = (double) sz;
}
}
double f(double p, double t)
{
return -expf(-TSCALE * t) * p;
}
int tpdt(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
void init(double *u, double *pebbles, int n)
{
int i, j, idx;
for(i = 0; i < n ; i++)
{
for(j = 0; j < n ; j++)
{
idx = j + i * n;
u[idx] = f(pebbles[idx], 0.0);
}
}
}
void run_cpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time)
{
double *un, *uc, *uo;
double t, dt;
un = (double*)malloc(sizeof(double) * n * n);
uc = (double*)malloc(sizeof(double) * n * n);
uo = (double*)malloc(sizeof(double) * n * n);
memcpy(uo, u0, sizeof(double) * n * n);
memcpy(uc, u1, sizeof(double) * n * n);
t = 0.;
dt = h / 2.;
while(1)
{
evolve9pt_1(un, uc, uo, pebbles, n, h, dt, t);
memcpy(uo, uc, sizeof(double) * n * n);
memcpy(uc, un, sizeof(double) * n * n);
if(!tpdt(&t,dt,end_time)) break;
}
memcpy(u, un, sizeof(double) * n * n);
}
void evolve9pt_1(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t)
{
int i, j, idx;
for( i = 0; i < n; i++)
{
for( j = 0; j < n; j++)
{
idx = j + i * n;
if( i == 0 || i == n - 1 || j == 0 || j == n - 1)
{
un[idx] = 0.;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f(pebbles[idx],t));
}
}
}
}
void print_heatmap(char *filename, double *u, int n, double h)
{
int i, j, idx;
FILE *fp = fopen(filename, "w");
for( i = 0; i < n; i++ )
{
for( j = 0; j < n; j++ )
{
idx = j + i * n;
fprintf(fp, "%f %f %0.2e\n", i*h, j*h, u[idx]);
}
}
fclose(fp);
}
| bd18a04339660920b377f25d5e59f78fdbc6fa24.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "mpi.h"
#include <stdbool.h>
#include <cuda_runtime.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define XMIN 0.0
#define XMAX 1.0
#define YMIN 0.0
#define YMAX 1.0
#define USE_MATH_DEFINES
#define MAX_PSZ 10
#define TSCALE 1.0
#define VSQR 0.1
#define NINEPTSTENCIL 1
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
void init(double *u, double *pebbles, int n);
void evolve(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t);
void evolve9pt(double *un, double *uc, double *uo, double *pebbles, int n, int rank, double *row, double *col, double *indv, double h, double dt, double t);
void evolve9pt_1(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t);
int tpdt(double *t, double dt, double end_time);
void print_heatmap(char *filename, double *u, int n, double h);
void init_pebbles(double *p, int pn, int n);
void run_cpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time);
void transfer(double *from, double *to, int r, int n, bool dir);
void dest(double *source, double *row, double *col, double *indv, int *hor, int *ver, int *diag, int rank, int size);
extern void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads);
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
__device__ double f_CUDA(double p, double t)
{
return -__expf(-TSCALE * t) * p;
}
__global__ void evolve9ptCUDA(double *un, double *uc, double *uo, double *pebbles, int n, int rank, double *row, double *col, double *indv, double h, double dt, double t) {
int idx = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y;
int i = idx / n;
int j = idx % n;
if(!(i == 0 || i == n - 1 || j == 0 || j == n - 1)) {
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else {
// Code for the fringe regions goes here...
switch (rank) {
case 1:
if (i==0 || j==0) {
un[idx]=0.;
}
else if(i==n-1 && j==n-1) { // Bottom right corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + row[j] + uc[idx - n] + 0.25*(row[j-1] + *indv + uc[idx - n - 1] + col[i-1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==n-1 && i<n-1) { // Right edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + col[i + 1] + uc[idx - n - 1] + col[i-1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==n-1 && j<n-1) { // Bottom edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + row[j] + uc[idx - n] + 0.25*(row[j - 1] + row[j + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
case 2:
if (i==0 || j==n-1){
un[idx]=0.;
}
else if(i==n-1 && j==0) { // Bottom left corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + row[j] + uc[idx - n] + 0.25*(*indv + row[j + 1] + col[i - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==0 && i<n-1) { // Left edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(col[i+1] + uc[idx + n + 1] + col[i-1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==n-1 && j>0) { // Bottom Edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + row[j] + uc[idx - n] + 0.25*(row[j - 1] + row[j + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
case 3:
if (i==n-1 || j==0){
un[idx]=0.;
}
else if(i==0 && j==n-1) { // Top right corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + uc[idx + n] + row[j] + 0.25*(uc[idx + n - 1] + col[i+1] + row[j-1] + *indv)- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==n-1 && i>0) { // Left edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + col[i] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + col[i+1] + uc[idx - n - 1] + col[i-1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==0 && j<n-1) { // Top Edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + row[j] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + row[j-1] + row[j+ 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
case 4:
if (i==n-1 || j==n-1){
un[idx]=0.;
}
else if(i==0 && j==0) { // Top left corner
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + uc[idx + n] + row[j] + 0.25*(col[i+1] + uc[idx+n+1] + *indv + row[j+1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(j==0 && i>0) { // Right edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((col[i] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(col[i+1] + uc[idx + n + 1] + col[i-1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
else if(i==0 && j>0) { // Top Edge
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + row[j] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + row[j-1] + row[j+1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t));
}
break;
}
}}
int main(int argc, char *argv[])
{
int numproc, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Status *status;
MPI_Request *request;
request = (MPI_Request *) malloc(numproc * sizeof(MPI_Request));
status = (MPI_Status *) malloc(numproc * sizeof(MPI_Status));
int npoints = 256;// atoi(argv[1]);
int npebs = 3;// atoi(argv[2]);
double end_time = 1.00;// (double)atof(argv[3]);
int nthreads = 1024;// atoi(argv[4]);
int narea = npoints * npoints;
bool once = true;
int size=(npoints/2)*(npoints/2);
double t, dt;
double h = (XMAX - XMIN)/npoints;
if (rank == 0) {
double *u_i0, *u_i1;
double *u_cpu, *u_gpu, *pebs;
double *peb, *n_cpu; //1, *n_cpu2, *n_cpu3, *n_cpu4;
double elapsed_cpu, elapsed_gpu;
struct timeval cpu_start, cpu_end, gpu_start, gpu_end;
peb = (double*)malloc(sizeof(double) * size);
u_i0 = (double*)malloc(sizeof(double) * narea);
u_i1 = (double*)malloc(sizeof(double) * narea);
u_cpu = (double*)malloc(sizeof(double) * narea);
u_gpu = (double*)malloc(sizeof(double) * narea);
n_cpu = (double*)malloc(sizeof(double) * size);
pebs = (double*)malloc(sizeof(double) * narea);
printf("Rank0: Running a (%d x %d) grid, until %f, with %d threads\n", npoints, npoints, end_time, nthreads);
init_pebbles(pebs, npebs, npoints);
init(u_i0, pebs, npoints);
init(u_i1, pebs, npoints);
// Initial
run_cpu(u_gpu, u_i0, u_i1, pebs, npoints, h, end_time);
print_heatmap("lake_cpu_f.dat", u_gpu, npoints, h);
// Tranfer to MPI nodes
int i;
for (i=1; i<numproc; i++) {
transfer(pebs, peb, i, npoints, true); //get corresponding data
MPI_Send(peb,size, MPI_DOUBLE, i, i, MPI_COMM_WORLD);
}
/*-----------------------------------*/
/* Stitch individual nodes together */
/*
MPI_Recv(n_cpu, size, MPI_DOUBLE, 1, 11, MPI_COMM_WORLD, &status[1]);
transfer(n_cpu, u_cpu, 1, npoints, false);
MPI_Recv(n_cpu, size, MPI_DOUBLE, 2, 12, MPI_COMM_WORLD, &status[2]);
transfer(n_cpu, u_cpu, 2, npoints, false);
MPI_Recv(n_cpu, size, MPI_DOUBLE, 3, 13, MPI_COMM_WORLD, &status[3]);
transfer(n_cpu, u_cpu, 3, npoints, false);
MPI_Recv(n_cpu, size, MPI_DOUBLE, 4, 14, MPI_COMM_WORLD, &status[4]);
transfer(n_cpu, u_cpu, 4, npoints, false);
// Save final Image
print_heatmap("lake_cpu_mpi.dat", u_cpu, npoints, h);
*/
/*-----------------------------------*/
}
else {
/* For Reference:
+ : Fringe edges
X : Diagonal fringe point
|``````````````````+|+````````````````|
| +|+ |
| +|+ |
| Rank 1 +|+ Rank 2 |
| +|+ |
|++++++++++++++++++X|X++++++++++++++++|
|++++++++++++++++++X|X++++++++++++++++|
| +|+ |
| +|+ |
| Rank 3 +|+ Rank 4 |
| +|+ |
| +|+ |
| +|+ |
```````````````````````````````````````
*/
cudaEvent_t kstart, kstop;
float ktime;
int number_amount;
double *un , *u0 , *u1 , *uc , *uo, *pebble;
double *d_un, *d_uc, *d_uo, *d_pebble, *d_temp;
int n = npoints/2;
u0 = (double*)malloc(sizeof(double) * n*n);
u1 = (double*)malloc(sizeof(double) * n*n);
un = (double*)malloc(sizeof(double) * n*n);
uc = (double*)malloc(sizeof(double) * n*n);
uo = (double*)malloc(sizeof(double) * n*n);
pebble = (double*)malloc(sizeof(double) * n*n);
MPI_Recv(pebble, size, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/*-----------------------------------*/
/* Sanity Check!*/ /*
MPI_Get_count(&status, MPI_INT, &number_amount);
printf("1 received %d numbers from 0. Message source = %d, "
"tag = %d\n",
number_amount, status.MPI_SOURCE, status.MPI_TAG);
/*-----------------------------------*/
init(u0, pebble, npoints/2);
init(u1, pebble, npoints/2);
// Begin Timer
t = 0.;
dt = h / 2.;
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
cudaMalloc((void **)&d_un, sizeof(double) * n * n);
cudaMalloc((void **)&d_uc, sizeof(double) * n * n);
cudaMalloc((void **)&d_uo, sizeof(double) * n * n);
cudaMalloc((void **)&d_pebble, sizeof(double) * n * n);
cudaMemcpy(d_uo, u0, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_uc, u1, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_pebble, pebble, sizeof(double) * n * n, cudaMemcpyHostToDevice);
dim3 block_dim(nthreads, nthreads,1);
dim3 grid_dim(n/nthreads, n/nthreads,1);
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
while(1) {
// What to send where
double *row, *col, *indv;
row = (double*)malloc(sizeof(double) * npoints/2);
col = (double*)malloc(sizeof(double) * npoints/2);
indv = (double*)malloc(sizeof(double));
int *hor, *ver, *diag;
hor = (int*)malloc(sizeof(int));
ver = (int*)malloc(sizeof(int));
diag = (int*)malloc(sizeof(int));
dest(un, row, col, indv, hor, ver, diag, rank, npoints/2);
// Send boundaries to respective neighbours
MPI_Send(row , npoints/2, MPI_DOUBLE, *ver , rank, MPI_COMM_WORLD);
MPI_Send(col , npoints/2, MPI_DOUBLE, *hor , rank, MPI_COMM_WORLD);
MPI_Send(indv, 1, MPI_DOUBLE, *diag, rank, MPI_COMM_WORLD);
// Compute turbulance: Receive neighbours
MPI_Recv(row, npoints/2, MPI_DOUBLE, *hor, *hor, MPI_COMM_WORLD, &status[rank]);
MPI_Recv(col, npoints/2, MPI_DOUBLE, *ver, *ver, MPI_COMM_WORLD, &status[rank]);
MPI_Recv(indv, 1, MPI_DOUBLE, *diag, *diag, MPI_COMM_WORLD, &status[rank]);
// Nine point stencil on CUDA cores
evolve9ptCUDA<<<grid_dim, block_dim>>>(d_un, d_uc, d_uo, d_pebble, n, rank, row, col, indv, h, dt, t);
d_temp = d_uc;
d_uc = d_un;
d_un = d_uo;
d_uo = d_temp;
if(!tpdt(&t,dt,end_time)) {
break;
}
}
cudaMemcpy(un, d_un, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
/*-----------------------------------*/
// // Send final results to Rank 0.
// MPI_Isend(un, size, MPI_DOUBLE, 0, rank+10, MPI_COMM_WORLD, &request[0]);
// printf("Done\n");
/*-----------------------------------*/
// Initial Output files
char* s;
s = (char*)malloc(sizeof(char)*17);
int k = sprintf(s, "lake_node_%d.dat", rank);
if (k>=0)
print_heatmap(s, un, npoints/2, h);
else {
printf("Error in filename!\n");
MPI_Finalize();
return 0;
}
}
MPI_Finalize();
return 0;
}
void dest(double *source, double *row, double *col, double *indv, int *hor, int *ver, int *diag, int myrank, int size) {
int i, x, y;
switch (myrank) {
case 1:
for(i=0; i<size; i++) {
x=size*(size-1)+i;
y=i*size+(size-1);
row[i]=source[x];
col[i]=source[y];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 2;
*diag = 4;
*hor = 3;
break;
case 2:
for(i=0; i<size; i++) {
x=size*(size-1)+i;
y=i;
row[i]=source[x];
col[i]=source[i];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 1;
*diag = 3;
*hor = 4;
break;
case 3:
for(i=0; i<size; i++) {
x=i;
y=i*size+(size-1);
row[i]=source[x];
col[i]=source[y];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 4;
*diag = 2;
*hor = 1;
break;
case 4:
for(i=0; i<size; i++) {
x=i;
y=i;
row[i]=source[x];
col[i]=source[y];
}
*indv = source[(size-1)*(size-1)-1];
*ver = 3;
*diag = 1;
*hor = 2;
break;
}
}
void transfer(double *from, double *to, int r, int n, bool dir) {
// This is really naive. I'll probably change it.
int x,y, idx_t, idx_f;
for (x=0; x<(int) n/2; x++)
for (y=0; y<(int) n/2; y++) {
if (r==1) {
idx_t = x*n/2+y;
idx_f = x*n+y;
}
else if (r==2) {
idx_t = x*n/2+y;
idx_f = x*n+n/2+y;
}
else if (r==3) {
idx_t=x*n/2+y;
idx_f=(x+n/2)*n+y;
}
else if (r==4) {
idx_t=x*n/2+y;
idx_f=(x+n/2)*n+y+n/2;
}
if (dir==true)
to[idx_t]=from[idx_f];
else
to[idx_f]=from[idx_t];
}
}
void init_pebbles(double *p, int pn, int n)
{
int i, j, k, idx;
int sz;
srand( time(NULL) );
memset(p, 0, sizeof(double) * n * n);
for( k = 0; k < pn ; k++ )
{
i = rand() % (n - 4) + 2;
j = rand() % (n - 4) + 2;
sz = rand() % MAX_PSZ;
idx = j + i * n;
p[idx] = (double) sz;
}
}
double f(double p, double t)
{
return -expf(-TSCALE * t) * p;
}
int tpdt(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
void init(double *u, double *pebbles, int n)
{
int i, j, idx;
for(i = 0; i < n ; i++)
{
for(j = 0; j < n ; j++)
{
idx = j + i * n;
u[idx] = f(pebbles[idx], 0.0);
}
}
}
void run_cpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time)
{
double *un, *uc, *uo;
double t, dt;
un = (double*)malloc(sizeof(double) * n * n);
uc = (double*)malloc(sizeof(double) * n * n);
uo = (double*)malloc(sizeof(double) * n * n);
memcpy(uo, u0, sizeof(double) * n * n);
memcpy(uc, u1, sizeof(double) * n * n);
t = 0.;
dt = h / 2.;
while(1)
{
evolve9pt_1(un, uc, uo, pebbles, n, h, dt, t);
memcpy(uo, uc, sizeof(double) * n * n);
memcpy(uc, un, sizeof(double) * n * n);
if(!tpdt(&t,dt,end_time)) break;
}
memcpy(u, un, sizeof(double) * n * n);
}
void evolve9pt_1(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t)
{
int i, j, idx;
for( i = 0; i < n; i++)
{
for( j = 0; j < n; j++)
{
idx = j + i * n;
if( i == 0 || i == n - 1 || j == 0 || j == n - 1)
{
un[idx] = 0.;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f(pebbles[idx],t));
}
}
}
}
void print_heatmap(char *filename, double *u, int n, double h)
{
int i, j, idx;
FILE *fp = fopen(filename, "w");
for( i = 0; i < n; i++ )
{
for( j = 0; j < n; j++ )
{
idx = j + i * n;
fprintf(fp, "%f %f %0.2e\n", i*h, j*h, u[idx]);
}
}
fclose(fp);
}
|
bdc4b40c668d74c42b32bd6df1ab6331b5a55021.hip | // !!! This is a file automatically generated by hipify!!!
#include "header_hip.cuh"
#include "gpu_memory.cuh"
__device__ void interpolate_gamma( cvklu_data *rate_data, double T, double *gamma, double *dgamma_dT )
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
log_temp_out = log(T);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
*gamma = rate_data->g_gammaH2_1[bin_id] + Tdef * (rate_data->g_gammaH2_1[bin_id+1] - rate_data->g_gammaH2_1[bin_id]);
*dgamma_dT = rate_data->g_dgammaH2_1_dT[bin_id] + Tdef * (rate_data->g_dgammaH2_1_dT[bin_id+1] - rate_data->g_dgammaH2_1_dT[bin_id]);
}
__device__ void evaluate_temperature( double* T, double* dTs_ge, const double *y, const double mdensity, cvklu_data *rate_data )
{
// iterate temperature to convergence
double t, tnew, tdiff;
double dge, dge_dT;
double gammaH2, dgammaH2_dT, _gammaH2_m1;
int count = 0;
int MAX_ITERATION = 100;
double gamma = 5./3.;
double _gamma_m1 = 1.0 / (gamma - 1.0);
double kb = 1.3806504e-16; // Boltzamann constant [erg/K]
// prepare t, tnew for the newton's iteration;
t = *T;
if (t != t) t = 1000.0;
tnew = 1.1*t;
tdiff = tnew - t;
while ( tdiff/ tnew > 0.001 ){
// We do Newton's Iteration to calculate the temperature
// Since gammaH2 is dependent on the temperature too!
interpolate_gamma( rate_data, t, &gammaH2, &dgammaH2_dT );
_gammaH2_m1 = 1.0 / (gammaH2 - 1.0);
dge_dT = t*kb*(-y[INDEX(0)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT - y[INDEX(1)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT)/(mdensity)
+ kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1 + y[INDEX(4)]*_gamma_m1
+ y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity);
dge = t*kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1
+ y[INDEX(4)]*_gamma_m1 + y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity) - y[INDEX(9)];
//This is the change in ge for each iteration
tnew = t - dge/dge_dT;
count += 1;
tdiff = fabs(t - tnew);
t = tnew;
if (count > MAX_ITERATION){
printf("T[tid = %d] failed to converge (iteration: %d); at T = %0.3g \n", T_ID, count, tnew );
}
if ( t!= t && T_ID == 0){
printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", T_ID, t, count, y[INDEX(9)], gammaH2);
t = 1000.0;
for (int i = 0; i < 10; i++){
printf("y[INDEX(%d)] = %0.5g \n", i, y[INDEX(i)]);
}
break;
}
}
// update the temperature;
*T = t;
*dTs_ge = 1.0 / dge_dT;
// printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", tid, t, count, y[INDEX(9)], gammaH2);
}
__device__ void interpolate_reaction_rates( double *reaction_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, invTs, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
invTs = 1.0 / temp_out;
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
reaction_rates_out[INDEX( 0)] = rate_data->r_k01[bin_id] + Tdef * (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]);
reaction_rates_out[INDEX( 1)] = rate_data->r_k02[bin_id] + Tdef * (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]);
reaction_rates_out[INDEX( 2)] = rate_data->r_k03[bin_id] + Tdef * (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]);
reaction_rates_out[INDEX( 3)] = rate_data->r_k04[bin_id] + Tdef * (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]);
reaction_rates_out[INDEX( 4)] = rate_data->r_k05[bin_id] + Tdef * (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]);
reaction_rates_out[INDEX( 5)] = rate_data->r_k06[bin_id] + Tdef * (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]);
reaction_rates_out[INDEX( 6)] = rate_data->r_k07[bin_id] + Tdef * (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]);
reaction_rates_out[INDEX( 7)] = rate_data->r_k08[bin_id] + Tdef * (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]);
reaction_rates_out[INDEX( 8)] = rate_data->r_k09[bin_id] + Tdef * (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]);
reaction_rates_out[INDEX( 9)] = rate_data->r_k10[bin_id] + Tdef * (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]);
reaction_rates_out[INDEX(10)] = rate_data->r_k11[bin_id] + Tdef * (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]);
reaction_rates_out[INDEX(11)] = rate_data->r_k12[bin_id] + Tdef * (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]);
reaction_rates_out[INDEX(12)] = rate_data->r_k13[bin_id] + Tdef * (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]);
reaction_rates_out[INDEX(13)] = rate_data->r_k14[bin_id] + Tdef * (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]);
reaction_rates_out[INDEX(14)] = rate_data->r_k15[bin_id] + Tdef * (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]);
reaction_rates_out[INDEX(15)] = rate_data->r_k16[bin_id] + Tdef * (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]);
reaction_rates_out[INDEX(16)] = rate_data->r_k17[bin_id] + Tdef * (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]);
reaction_rates_out[INDEX(17)] = rate_data->r_k18[bin_id] + Tdef * (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]);
reaction_rates_out[INDEX(18)] = rate_data->r_k19[bin_id] + Tdef * (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]);
//reaction_rates_out[INDEX(19)] = rate_data->r_k20[bin_id] + Tdef * (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]);
reaction_rates_out[INDEX(20)] = rate_data->r_k21[bin_id] + Tdef * (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]);
reaction_rates_out[INDEX(21)] = rate_data->r_k22[bin_id] + Tdef * (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]);
//reaction_rates_out[INDEX(22)] = rate_data->r_k23[bin_id] + Tdef * (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]);
}
__device__ void interpolate_cooling_rates( double *cooling_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
/*
if (T_ID == 0){
printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out);
}
*/
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
cooling_rates_out[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id] + Tdef * (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]);
cooling_rates_out[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id] + Tdef * (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]);
cooling_rates_out[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id] + Tdef * (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]);
cooling_rates_out[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id] + Tdef * (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]);
cooling_rates_out[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id] + Tdef * (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]);
cooling_rates_out[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id] + Tdef * (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]);
cooling_rates_out[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id] + Tdef * (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]);
cooling_rates_out[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id] + Tdef * (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]);
cooling_rates_out[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id] + Tdef * (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]);
cooling_rates_out[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id] + Tdef * (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]);
cooling_rates_out[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id] + Tdef * (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]);
cooling_rates_out[INDEX(11)] = rate_data->c_brem_brem[bin_id] + Tdef * (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]);
cooling_rates_out[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]);
cooling_rates_out[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id] + Tdef * (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]);
cooling_rates_out[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]);
cooling_rates_out[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]);
cooling_rates_out[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id] + Tdef * (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]);
cooling_rates_out[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id] + Tdef * (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]);
cooling_rates_out[INDEX(18)] = rate_data->c_compton_comp_[bin_id] + Tdef * (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]);
cooling_rates_out[INDEX(19)] = rate_data->c_gammah_gammah[bin_id] + Tdef * (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]);
cooling_rates_out[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id] + Tdef * (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]);
cooling_rates_out[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id] + Tdef * (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]);
cooling_rates_out[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id] + Tdef * (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]);
cooling_rates_out[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id] + Tdef * (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]);
cooling_rates_out[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id] + Tdef * (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]);
cooling_rates_out[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
cooling_rates_out[INDEX(26)] = 1.0; //rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
}
__device__ void interpolate_dcrate_dT(double *dcr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//ceHI_ceHI: 0
dcr_dT[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id];
dcr_dT[INDEX( 0)] /= dT ;
dcr_dT[INDEX( 0)] /= inv_Ts ;
//ceHeI_ceHeI: 1
dcr_dT[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id];
dcr_dT[INDEX( 1)] /= dT ;
dcr_dT[INDEX( 1)] /= inv_Ts ;
//ceHeII_ceHeII: 2
dcr_dT[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id];
dcr_dT[INDEX( 2)] /= dT ;
dcr_dT[INDEX( 2)] /= inv_Ts ;
//ciHeIS_ciHeIS: 3
dcr_dT[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id];
dcr_dT[INDEX( 3)] /= dT ;
dcr_dT[INDEX( 3)] /= inv_Ts ;
//ciHI_ciHI: 4
dcr_dT[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id];
dcr_dT[INDEX( 4)] /= dT ;
dcr_dT[INDEX( 4)] /= inv_Ts ;
//ciHeI_ciHeI: 5
dcr_dT[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id];
dcr_dT[INDEX( 5)] /= dT ;
dcr_dT[INDEX( 5)] /= inv_Ts ;
//ciHeII_ciHeII: 6
dcr_dT[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id];
dcr_dT[INDEX( 6)] /= dT ;
dcr_dT[INDEX( 6)] /= inv_Ts ;
//reHII_reHII: 7
dcr_dT[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id];
dcr_dT[INDEX( 7)] /= dT ;
dcr_dT[INDEX( 7)] /= inv_Ts ;
//reHeII1_reHeII1: 8
dcr_dT[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id];
dcr_dT[INDEX( 8)] /= dT ;
dcr_dT[INDEX( 8)] /= inv_Ts ;
//reHeII2_reHeII2: 9
dcr_dT[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id];
dcr_dT[INDEX( 9)] /= dT ;
dcr_dT[INDEX( 9)] /= inv_Ts ;
//reHeIII_reHeIII: 10
dcr_dT[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id];
dcr_dT[INDEX(10)] /= dT ;
dcr_dT[INDEX(10)] /= inv_Ts ;
//brem_brem: 11
dcr_dT[INDEX(11)] = rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id];
dcr_dT[INDEX(11)] /= dT ;
dcr_dT[INDEX(11)] /= inv_Ts ;
//gloverabel08_gaHI: 12
dcr_dT[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id];
dcr_dT[INDEX(12)] /= dT ;
dcr_dT[INDEX(12)] /= inv_Ts ;
//gloverabel08_gaH2: 13
dcr_dT[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id];
dcr_dT[INDEX(13)] /= dT ;
dcr_dT[INDEX(13)] /= inv_Ts ;
//gloverabel08_gaHe: 14
dcr_dT[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id];
dcr_dT[INDEX(14)] /= dT ;
dcr_dT[INDEX(14)] /= inv_Ts ;
//gloverabel08_gaHp: 15
dcr_dT[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id];
dcr_dT[INDEX(15)] /= dT ;
dcr_dT[INDEX(15)] /= inv_Ts ;
//gloverabel08_gael: 16
dcr_dT[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id];
dcr_dT[INDEX(16)] /= dT ;
dcr_dT[INDEX(16)] /= inv_Ts ;
//gloverabel08_h2lte: 17
dcr_dT[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id];
dcr_dT[INDEX(17)] /= dT ;
dcr_dT[INDEX(17)] /= inv_Ts ;
//compton_comp_: 18
dcr_dT[INDEX(18)] = rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id];
dcr_dT[INDEX(18)] /= dT ;
dcr_dT[INDEX(18)] /= inv_Ts ;
//gammah_gammah: 19
dcr_dT[INDEX(19)] = rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id];
dcr_dT[INDEX(19)] /= dT ;
dcr_dT[INDEX(19)] /= inv_Ts ;
//h2formation_h2mheat: 20
dcr_dT[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id];
dcr_dT[INDEX(20)] /= dT ;
dcr_dT[INDEX(20)] /= inv_Ts ;
//h2formation_h2mcool: 21
dcr_dT[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id];
dcr_dT[INDEX(21)] /= dT ;
dcr_dT[INDEX(21)] /= inv_Ts ;
//h2formation_ncrn: 22
dcr_dT[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id];
dcr_dT[INDEX(22)] /= dT ;
dcr_dT[INDEX(22)] /= inv_Ts ;
//h2formation_ncrd1: 23
dcr_dT[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id];
dcr_dT[INDEX(23)] /= dT ;
dcr_dT[INDEX(23)] /= inv_Ts ;
//h2formation_ncrd2: 24
dcr_dT[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id];
dcr_dT[INDEX(24)] /= dT ;
dcr_dT[INDEX(24)] /= inv_Ts ;
//cie_cooling_cieco: 25
dcr_dT[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id];
dcr_dT[INDEX(25)] /= dT ;
dcr_dT[INDEX(25)] /= inv_Ts ;
//cie_optical_depth_approx: 26
//dcr_dT[INDEX(26)] = rate_data->c_cie_optical_depth_approx[bin_id+1] - rate_data->c_cie_optical_depth_approx[bin_id];
//dcr_dT[INDEX(26)] /= dT ;
//dcr_dT[INDEX(26)] /= inv_Ts ;
dcr_dT[INDEX(26)] = 0.0;
}
__device__ void interpolate_drrate_dT(double *drr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//k01: 0
drr_dT[INDEX( 0)] = rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id];
drr_dT[INDEX( 0)] /= dT ;
drr_dT[INDEX( 0)] /= inv_Ts ;
//k02: 1
drr_dT[INDEX( 1)] = rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id];
drr_dT[INDEX( 1)] /= dT ;
drr_dT[INDEX( 1)] /= inv_Ts ;
//k03: 2
drr_dT[INDEX( 2)] = rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id];
drr_dT[INDEX( 2)] /= dT ;
drr_dT[INDEX( 2)] /= inv_Ts ;
//k04: 3
drr_dT[INDEX( 3)] = rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id];
drr_dT[INDEX( 3)] /= dT ;
drr_dT[INDEX( 3)] /= inv_Ts ;
//k05: 4
drr_dT[INDEX( 4)] = rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id];
drr_dT[INDEX( 4)] /= dT ;
drr_dT[INDEX( 4)] /= inv_Ts ;
//k06: 5
drr_dT[INDEX( 5)] = rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id];
drr_dT[INDEX( 5)] /= dT ;
drr_dT[INDEX( 5)] /= inv_Ts ;
//k07: 6
drr_dT[INDEX( 6)] = rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id];
drr_dT[INDEX( 6)] /= dT ;
drr_dT[INDEX( 6)] /= inv_Ts ;
//k08: 7
drr_dT[INDEX( 7)] = rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id];
drr_dT[INDEX( 7)] /= dT ;
drr_dT[INDEX( 7)] /= inv_Ts ;
//k09: 8
drr_dT[INDEX( 8)] = rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id];
drr_dT[INDEX( 8)] /= dT ;
drr_dT[INDEX( 8)] /= inv_Ts ;
//k10: 9
drr_dT[INDEX( 9)] = rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id];
drr_dT[INDEX( 9)] /= dT ;
drr_dT[INDEX( 9)] /= inv_Ts ;
//k11: 10
drr_dT[INDEX(10)] = rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id];
drr_dT[INDEX(10)] /= dT ;
drr_dT[INDEX(10)] /= inv_Ts ;
//k12: 11
drr_dT[INDEX(11)] = rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id];
drr_dT[INDEX(11)] /= dT ;
drr_dT[INDEX(11)] /= inv_Ts ;
//k13: 12
drr_dT[INDEX(12)] = rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id];
drr_dT[INDEX(12)] /= dT ;
drr_dT[INDEX(12)] /= inv_Ts ;
//k14: 13
drr_dT[INDEX(13)] = rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id];
drr_dT[INDEX(13)] /= dT ;
drr_dT[INDEX(13)] /= inv_Ts ;
//k15: 14
drr_dT[INDEX(14)] = rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id];
drr_dT[INDEX(14)] /= dT ;
drr_dT[INDEX(14)] /= inv_Ts ;
//k16: 15
drr_dT[INDEX(15)] = rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id];
drr_dT[INDEX(15)] /= dT ;
drr_dT[INDEX(15)] /= inv_Ts ;
//k17: 16
drr_dT[INDEX(16)] = rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id];
drr_dT[INDEX(16)] /= dT ;
drr_dT[INDEX(16)] /= inv_Ts ;
//k18: 17
drr_dT[INDEX(17)] = rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id];
drr_dT[INDEX(17)] /= dT ;
drr_dT[INDEX(17)] /= inv_Ts ;
//k19: 18
drr_dT[INDEX(18)] = rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id];
drr_dT[INDEX(18)] /= dT ;
drr_dT[INDEX(18)] /= inv_Ts ;
//k20: 19
//drr_dT[INDEX(19)] = rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id];
//drr_dT[INDEX(19)] /= dT ;
//drr_dT[INDEX(19)] /= inv_Ts ;
//k21: 20
drr_dT[INDEX(20)] = rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id];
drr_dT[INDEX(20)] /= dT ;
drr_dT[INDEX(20)] /= inv_Ts ;
//k22: 21
drr_dT[INDEX(21)] = rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id];
drr_dT[INDEX(21)] /= dT ;
drr_dT[INDEX(21)] /= inv_Ts ;
//k23: 22
//drr_dT[INDEX(22)] = rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id];
//drr_dT[INDEX(22)] /= dT ;
//drr_dT[INDEX(22)] /= inv_Ts ;
}
__device__ void dydt (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ dy, const mechanism_memory * d_mem) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// int NSPECIES = 10;
int NRATE = 23;
int NCOOL = 26;
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
// scale related piece
double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
double mdensity = d_mem->density[T_ID];
double inv_mdensity = 1.0 / mdensity;
double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
double * __restrict__ scale = d_mem->scale;
double * __restrict__ inv_scale = d_mem->inv_scale;
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)]*scale[INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g\n", i, y_in[INDEX(i)], i, scale[INDEX(i)] );
}
#else
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y, mdensity, rate_data );
interpolate_reaction_rates( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
//# 0: H2_1
dy[INDEX(0)] = local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] - local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] + local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] + local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 1: H2_2
dy[INDEX(1)] = local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 2: H_1
dy[INDEX(2)] = -local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] - 2*local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] - 2*local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 3: H_2
dy[INDEX(3)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)];
//# 4: H_m0
dy[INDEX(4)] = local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] - local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 5: He_1
dy[INDEX(5)] = -local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] + local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)];
//# 6: He_2
dy[INDEX(6)] = local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 7: He_3
dy[INDEX(7)] = local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 8: de
dy[INDEX(8)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] + local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)];
//# 9: ge
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*( T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
/*
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*(T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
*/
dy[INDEX(9)] *= inv_mdensity;
#ifdef SCALE_INPUT
// scaling the dydt vector back to code untis
for (int i = 0; i< 10; i++){
dy[INDEX(i)] *= inv_scale[INDEX(i)];
}
#endif
if ( T_ID == 0 ){
*d_mem->rhs_call += 1;
printf("t = %0.5g; rhs_call = %d\n", t, *d_mem->rhs_call );
}
if ( T_ID == 0 ){
printf("time = %0.5g, at temp = %0.5g\n", t, T_local);
for (int i = 0; i< 10; i++){
printf("from tid[%d]: dy[%d] = %0.5g, y = %0.5g at t = %0.5g \n", T_ID, i, dy[INDEX(i)], y_in[INDEX(i)], t);
}
}
// printf(" \n");
// }
}
| bdc4b40c668d74c42b32bd6df1ab6331b5a55021.cu | #include "header.cuh"
#include "gpu_memory.cuh"
__device__ void interpolate_gamma( cvklu_data *rate_data, double T, double *gamma, double *dgamma_dT )
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
log_temp_out = log(T);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
*gamma = rate_data->g_gammaH2_1[bin_id] + Tdef * (rate_data->g_gammaH2_1[bin_id+1] - rate_data->g_gammaH2_1[bin_id]);
*dgamma_dT = rate_data->g_dgammaH2_1_dT[bin_id] + Tdef * (rate_data->g_dgammaH2_1_dT[bin_id+1] - rate_data->g_dgammaH2_1_dT[bin_id]);
}
__device__ void evaluate_temperature( double* T, double* dTs_ge, const double *y, const double mdensity, cvklu_data *rate_data )
{
// iterate temperature to convergence
double t, tnew, tdiff;
double dge, dge_dT;
double gammaH2, dgammaH2_dT, _gammaH2_m1;
int count = 0;
int MAX_ITERATION = 100;
double gamma = 5./3.;
double _gamma_m1 = 1.0 / (gamma - 1.0);
double kb = 1.3806504e-16; // Boltzamann constant [erg/K]
// prepare t, tnew for the newton's iteration;
t = *T;
if (t != t) t = 1000.0;
tnew = 1.1*t;
tdiff = tnew - t;
while ( tdiff/ tnew > 0.001 ){
// We do Newton's Iteration to calculate the temperature
// Since gammaH2 is dependent on the temperature too!
interpolate_gamma( rate_data, t, &gammaH2, &dgammaH2_dT );
_gammaH2_m1 = 1.0 / (gammaH2 - 1.0);
dge_dT = t*kb*(-y[INDEX(0)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT - y[INDEX(1)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT)/(mdensity)
+ kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1 + y[INDEX(4)]*_gamma_m1
+ y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity);
dge = t*kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1
+ y[INDEX(4)]*_gamma_m1 + y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity) - y[INDEX(9)];
//This is the change in ge for each iteration
tnew = t - dge/dge_dT;
count += 1;
tdiff = fabs(t - tnew);
t = tnew;
if (count > MAX_ITERATION){
printf("T[tid = %d] failed to converge (iteration: %d); at T = %0.3g \n", T_ID, count, tnew );
}
if ( t!= t && T_ID == 0){
printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", T_ID, t, count, y[INDEX(9)], gammaH2);
t = 1000.0;
for (int i = 0; i < 10; i++){
printf("y[INDEX(%d)] = %0.5g \n", i, y[INDEX(i)]);
}
break;
}
}
// update the temperature;
*T = t;
*dTs_ge = 1.0 / dge_dT;
// printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", tid, t, count, y[INDEX(9)], gammaH2);
}
__device__ void interpolate_reaction_rates( double *reaction_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, invTs, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
invTs = 1.0 / temp_out;
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
reaction_rates_out[INDEX( 0)] = rate_data->r_k01[bin_id] + Tdef * (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]);
reaction_rates_out[INDEX( 1)] = rate_data->r_k02[bin_id] + Tdef * (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]);
reaction_rates_out[INDEX( 2)] = rate_data->r_k03[bin_id] + Tdef * (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]);
reaction_rates_out[INDEX( 3)] = rate_data->r_k04[bin_id] + Tdef * (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]);
reaction_rates_out[INDEX( 4)] = rate_data->r_k05[bin_id] + Tdef * (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]);
reaction_rates_out[INDEX( 5)] = rate_data->r_k06[bin_id] + Tdef * (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]);
reaction_rates_out[INDEX( 6)] = rate_data->r_k07[bin_id] + Tdef * (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]);
reaction_rates_out[INDEX( 7)] = rate_data->r_k08[bin_id] + Tdef * (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]);
reaction_rates_out[INDEX( 8)] = rate_data->r_k09[bin_id] + Tdef * (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]);
reaction_rates_out[INDEX( 9)] = rate_data->r_k10[bin_id] + Tdef * (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]);
reaction_rates_out[INDEX(10)] = rate_data->r_k11[bin_id] + Tdef * (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]);
reaction_rates_out[INDEX(11)] = rate_data->r_k12[bin_id] + Tdef * (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]);
reaction_rates_out[INDEX(12)] = rate_data->r_k13[bin_id] + Tdef * (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]);
reaction_rates_out[INDEX(13)] = rate_data->r_k14[bin_id] + Tdef * (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]);
reaction_rates_out[INDEX(14)] = rate_data->r_k15[bin_id] + Tdef * (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]);
reaction_rates_out[INDEX(15)] = rate_data->r_k16[bin_id] + Tdef * (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]);
reaction_rates_out[INDEX(16)] = rate_data->r_k17[bin_id] + Tdef * (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]);
reaction_rates_out[INDEX(17)] = rate_data->r_k18[bin_id] + Tdef * (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]);
reaction_rates_out[INDEX(18)] = rate_data->r_k19[bin_id] + Tdef * (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]);
//reaction_rates_out[INDEX(19)] = rate_data->r_k20[bin_id] + Tdef * (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]);
reaction_rates_out[INDEX(20)] = rate_data->r_k21[bin_id] + Tdef * (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]);
reaction_rates_out[INDEX(21)] = rate_data->r_k22[bin_id] + Tdef * (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]);
//reaction_rates_out[INDEX(22)] = rate_data->r_k23[bin_id] + Tdef * (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]);
}
__device__ void interpolate_cooling_rates( double *cooling_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
/*
if (T_ID == 0){
printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out);
}
*/
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
cooling_rates_out[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id] + Tdef * (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]);
cooling_rates_out[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id] + Tdef * (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]);
cooling_rates_out[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id] + Tdef * (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]);
cooling_rates_out[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id] + Tdef * (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]);
cooling_rates_out[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id] + Tdef * (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]);
cooling_rates_out[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id] + Tdef * (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]);
cooling_rates_out[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id] + Tdef * (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]);
cooling_rates_out[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id] + Tdef * (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]);
cooling_rates_out[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id] + Tdef * (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]);
cooling_rates_out[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id] + Tdef * (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]);
cooling_rates_out[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id] + Tdef * (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]);
cooling_rates_out[INDEX(11)] = rate_data->c_brem_brem[bin_id] + Tdef * (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]);
cooling_rates_out[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]);
cooling_rates_out[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id] + Tdef * (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]);
cooling_rates_out[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]);
cooling_rates_out[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]);
cooling_rates_out[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id] + Tdef * (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]);
cooling_rates_out[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id] + Tdef * (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]);
cooling_rates_out[INDEX(18)] = rate_data->c_compton_comp_[bin_id] + Tdef * (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]);
cooling_rates_out[INDEX(19)] = rate_data->c_gammah_gammah[bin_id] + Tdef * (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]);
cooling_rates_out[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id] + Tdef * (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]);
cooling_rates_out[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id] + Tdef * (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]);
cooling_rates_out[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id] + Tdef * (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]);
cooling_rates_out[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id] + Tdef * (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]);
cooling_rates_out[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id] + Tdef * (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]);
cooling_rates_out[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
cooling_rates_out[INDEX(26)] = 1.0; //rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
}
__device__ void interpolate_dcrate_dT(double *dcr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//ceHI_ceHI: 0
dcr_dT[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id];
dcr_dT[INDEX( 0)] /= dT ;
dcr_dT[INDEX( 0)] /= inv_Ts ;
//ceHeI_ceHeI: 1
dcr_dT[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id];
dcr_dT[INDEX( 1)] /= dT ;
dcr_dT[INDEX( 1)] /= inv_Ts ;
//ceHeII_ceHeII: 2
dcr_dT[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id];
dcr_dT[INDEX( 2)] /= dT ;
dcr_dT[INDEX( 2)] /= inv_Ts ;
//ciHeIS_ciHeIS: 3
dcr_dT[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id];
dcr_dT[INDEX( 3)] /= dT ;
dcr_dT[INDEX( 3)] /= inv_Ts ;
//ciHI_ciHI: 4
dcr_dT[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id];
dcr_dT[INDEX( 4)] /= dT ;
dcr_dT[INDEX( 4)] /= inv_Ts ;
//ciHeI_ciHeI: 5
dcr_dT[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id];
dcr_dT[INDEX( 5)] /= dT ;
dcr_dT[INDEX( 5)] /= inv_Ts ;
//ciHeII_ciHeII: 6
dcr_dT[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id];
dcr_dT[INDEX( 6)] /= dT ;
dcr_dT[INDEX( 6)] /= inv_Ts ;
//reHII_reHII: 7
dcr_dT[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id];
dcr_dT[INDEX( 7)] /= dT ;
dcr_dT[INDEX( 7)] /= inv_Ts ;
//reHeII1_reHeII1: 8
dcr_dT[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id];
dcr_dT[INDEX( 8)] /= dT ;
dcr_dT[INDEX( 8)] /= inv_Ts ;
//reHeII2_reHeII2: 9
dcr_dT[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id];
dcr_dT[INDEX( 9)] /= dT ;
dcr_dT[INDEX( 9)] /= inv_Ts ;
//reHeIII_reHeIII: 10
dcr_dT[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id];
dcr_dT[INDEX(10)] /= dT ;
dcr_dT[INDEX(10)] /= inv_Ts ;
//brem_brem: 11
dcr_dT[INDEX(11)] = rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id];
dcr_dT[INDEX(11)] /= dT ;
dcr_dT[INDEX(11)] /= inv_Ts ;
//gloverabel08_gaHI: 12
dcr_dT[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id];
dcr_dT[INDEX(12)] /= dT ;
dcr_dT[INDEX(12)] /= inv_Ts ;
//gloverabel08_gaH2: 13
dcr_dT[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id];
dcr_dT[INDEX(13)] /= dT ;
dcr_dT[INDEX(13)] /= inv_Ts ;
//gloverabel08_gaHe: 14
dcr_dT[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id];
dcr_dT[INDEX(14)] /= dT ;
dcr_dT[INDEX(14)] /= inv_Ts ;
//gloverabel08_gaHp: 15
dcr_dT[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id];
dcr_dT[INDEX(15)] /= dT ;
dcr_dT[INDEX(15)] /= inv_Ts ;
//gloverabel08_gael: 16
dcr_dT[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id];
dcr_dT[INDEX(16)] /= dT ;
dcr_dT[INDEX(16)] /= inv_Ts ;
//gloverabel08_h2lte: 17
dcr_dT[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id];
dcr_dT[INDEX(17)] /= dT ;
dcr_dT[INDEX(17)] /= inv_Ts ;
//compton_comp_: 18
dcr_dT[INDEX(18)] = rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id];
dcr_dT[INDEX(18)] /= dT ;
dcr_dT[INDEX(18)] /= inv_Ts ;
//gammah_gammah: 19
dcr_dT[INDEX(19)] = rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id];
dcr_dT[INDEX(19)] /= dT ;
dcr_dT[INDEX(19)] /= inv_Ts ;
//h2formation_h2mheat: 20
dcr_dT[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id];
dcr_dT[INDEX(20)] /= dT ;
dcr_dT[INDEX(20)] /= inv_Ts ;
//h2formation_h2mcool: 21
dcr_dT[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id];
dcr_dT[INDEX(21)] /= dT ;
dcr_dT[INDEX(21)] /= inv_Ts ;
//h2formation_ncrn: 22
dcr_dT[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id];
dcr_dT[INDEX(22)] /= dT ;
dcr_dT[INDEX(22)] /= inv_Ts ;
//h2formation_ncrd1: 23
dcr_dT[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id];
dcr_dT[INDEX(23)] /= dT ;
dcr_dT[INDEX(23)] /= inv_Ts ;
//h2formation_ncrd2: 24
dcr_dT[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id];
dcr_dT[INDEX(24)] /= dT ;
dcr_dT[INDEX(24)] /= inv_Ts ;
//cie_cooling_cieco: 25
dcr_dT[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id];
dcr_dT[INDEX(25)] /= dT ;
dcr_dT[INDEX(25)] /= inv_Ts ;
//cie_optical_depth_approx: 26
//dcr_dT[INDEX(26)] = rate_data->c_cie_optical_depth_approx[bin_id+1] - rate_data->c_cie_optical_depth_approx[bin_id];
//dcr_dT[INDEX(26)] /= dT ;
//dcr_dT[INDEX(26)] /= inv_Ts ;
dcr_dT[INDEX(26)] = 0.0;
}
__device__ void interpolate_drrate_dT(double *drr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//k01: 0
drr_dT[INDEX( 0)] = rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id];
drr_dT[INDEX( 0)] /= dT ;
drr_dT[INDEX( 0)] /= inv_Ts ;
//k02: 1
drr_dT[INDEX( 1)] = rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id];
drr_dT[INDEX( 1)] /= dT ;
drr_dT[INDEX( 1)] /= inv_Ts ;
//k03: 2
drr_dT[INDEX( 2)] = rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id];
drr_dT[INDEX( 2)] /= dT ;
drr_dT[INDEX( 2)] /= inv_Ts ;
//k04: 3
drr_dT[INDEX( 3)] = rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id];
drr_dT[INDEX( 3)] /= dT ;
drr_dT[INDEX( 3)] /= inv_Ts ;
//k05: 4
drr_dT[INDEX( 4)] = rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id];
drr_dT[INDEX( 4)] /= dT ;
drr_dT[INDEX( 4)] /= inv_Ts ;
//k06: 5
drr_dT[INDEX( 5)] = rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id];
drr_dT[INDEX( 5)] /= dT ;
drr_dT[INDEX( 5)] /= inv_Ts ;
//k07: 6
drr_dT[INDEX( 6)] = rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id];
drr_dT[INDEX( 6)] /= dT ;
drr_dT[INDEX( 6)] /= inv_Ts ;
//k08: 7
drr_dT[INDEX( 7)] = rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id];
drr_dT[INDEX( 7)] /= dT ;
drr_dT[INDEX( 7)] /= inv_Ts ;
//k09: 8
drr_dT[INDEX( 8)] = rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id];
drr_dT[INDEX( 8)] /= dT ;
drr_dT[INDEX( 8)] /= inv_Ts ;
//k10: 9
drr_dT[INDEX( 9)] = rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id];
drr_dT[INDEX( 9)] /= dT ;
drr_dT[INDEX( 9)] /= inv_Ts ;
//k11: 10
drr_dT[INDEX(10)] = rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id];
drr_dT[INDEX(10)] /= dT ;
drr_dT[INDEX(10)] /= inv_Ts ;
//k12: 11
drr_dT[INDEX(11)] = rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id];
drr_dT[INDEX(11)] /= dT ;
drr_dT[INDEX(11)] /= inv_Ts ;
//k13: 12
drr_dT[INDEX(12)] = rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id];
drr_dT[INDEX(12)] /= dT ;
drr_dT[INDEX(12)] /= inv_Ts ;
//k14: 13
drr_dT[INDEX(13)] = rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id];
drr_dT[INDEX(13)] /= dT ;
drr_dT[INDEX(13)] /= inv_Ts ;
//k15: 14
drr_dT[INDEX(14)] = rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id];
drr_dT[INDEX(14)] /= dT ;
drr_dT[INDEX(14)] /= inv_Ts ;
//k16: 15
drr_dT[INDEX(15)] = rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id];
drr_dT[INDEX(15)] /= dT ;
drr_dT[INDEX(15)] /= inv_Ts ;
//k17: 16
drr_dT[INDEX(16)] = rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id];
drr_dT[INDEX(16)] /= dT ;
drr_dT[INDEX(16)] /= inv_Ts ;
//k18: 17
drr_dT[INDEX(17)] = rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id];
drr_dT[INDEX(17)] /= dT ;
drr_dT[INDEX(17)] /= inv_Ts ;
//k19: 18
drr_dT[INDEX(18)] = rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id];
drr_dT[INDEX(18)] /= dT ;
drr_dT[INDEX(18)] /= inv_Ts ;
//k20: 19
//drr_dT[INDEX(19)] = rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id];
//drr_dT[INDEX(19)] /= dT ;
//drr_dT[INDEX(19)] /= inv_Ts ;
//k21: 20
drr_dT[INDEX(20)] = rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id];
drr_dT[INDEX(20)] /= dT ;
drr_dT[INDEX(20)] /= inv_Ts ;
//k22: 21
drr_dT[INDEX(21)] = rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id];
drr_dT[INDEX(21)] /= dT ;
drr_dT[INDEX(21)] /= inv_Ts ;
//k23: 22
//drr_dT[INDEX(22)] = rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id];
//drr_dT[INDEX(22)] /= dT ;
//drr_dT[INDEX(22)] /= inv_Ts ;
}
__device__ void dydt (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ dy, const mechanism_memory * d_mem) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// int NSPECIES = 10;
int NRATE = 23;
int NCOOL = 26;
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
// scale related piece
double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
double mdensity = d_mem->density[T_ID];
double inv_mdensity = 1.0 / mdensity;
double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
double * __restrict__ scale = d_mem->scale;
double * __restrict__ inv_scale = d_mem->inv_scale;
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)]*scale[INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g\n", i, y_in[INDEX(i)], i, scale[INDEX(i)] );
}
#else
for (int i = 0; i < 10; i++){
y[INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y, mdensity, rate_data );
interpolate_reaction_rates( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
//# 0: H2_1
dy[INDEX(0)] = local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] - local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] + local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] + local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 1: H2_2
dy[INDEX(1)] = local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 2: H_1
dy[INDEX(2)] = -local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] - local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] + local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y[INDEX(0)]*y[INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y[INDEX(0)]*y[INDEX(2)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)] + local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)] - 2*local_reaction_rates[INDEX(20)]*y[INDEX(0)]*y[INDEX(2)]*y[INDEX(2)] - 2*local_reaction_rates[INDEX(21)]*y[INDEX(2)]*y[INDEX(2)]*y[INDEX(2)];
//# 3: H_2
dy[INDEX(3)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] - local_reaction_rates[INDEX(8)]*y[INDEX(2)]*y[INDEX(3)] + local_reaction_rates[INDEX(9)]*y[INDEX(1)]*y[INDEX(2)] - local_reaction_rates[INDEX(10)]*y[INDEX(0)]*y[INDEX(3)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)];
//# 4: H_m0
dy[INDEX(4)] = local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] - local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] - local_reaction_rates[INDEX(15)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(18)]*y[INDEX(1)]*y[INDEX(4)];
//# 5: He_1
dy[INDEX(5)] = -local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] + local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)];
//# 6: He_2
dy[INDEX(6)] = local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 7: He_3
dy[INDEX(7)] = local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)];
//# 8: de
dy[INDEX(8)] = local_reaction_rates[INDEX(0)]*y[INDEX(2)]*y[INDEX(8)] - local_reaction_rates[INDEX(1)]*y[INDEX(3)]*y[INDEX(8)] + local_reaction_rates[INDEX(2)]*y[INDEX(5)]*y[INDEX(8)] - local_reaction_rates[INDEX(3)]*y[INDEX(6)]*y[INDEX(8)] + local_reaction_rates[INDEX(4)]*y[INDEX(6)]*y[INDEX(8)] - local_reaction_rates[INDEX(5)]*y[INDEX(7)]*y[INDEX(8)] - local_reaction_rates[INDEX(6)]*y[INDEX(2)]*y[INDEX(8)] + local_reaction_rates[INDEX(7)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(13)]*y[INDEX(4)]*y[INDEX(8)] + local_reaction_rates[INDEX(14)]*y[INDEX(2)]*y[INDEX(4)] + local_reaction_rates[INDEX(16)]*y[INDEX(3)]*y[INDEX(4)] - local_reaction_rates[INDEX(17)]*y[INDEX(1)]*y[INDEX(8)];
//# 9: ge
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*( T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
/*
dy[INDEX(9)] = -2.01588*y[INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y[INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y[INDEX(0)]*local_cooling_rates[INDEX(13)] + y[INDEX(2)]*local_cooling_rates[INDEX(12)] + y[INDEX(3)]*local_cooling_rates[INDEX(15)] + y[INDEX(5)]*local_cooling_rates[INDEX(14)] + y[INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y[INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(3)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(7)] - y[INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y[INDEX(8)], 2) - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(8)] - y[INDEX(6)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(9)] - y[INDEX(7)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y[INDEX(8)]*(y[INDEX(3)] + y[INDEX(6)] + 4.0*y[INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y[INDEX(8)]*(T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y[INDEX(0)]*local_cooling_rates[INDEX(24)] + y[INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y[INDEX(0)]*y[INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y[INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
*/
dy[INDEX(9)] *= inv_mdensity;
#ifdef SCALE_INPUT
// scaling the dydt vector back to code untis
for (int i = 0; i< 10; i++){
dy[INDEX(i)] *= inv_scale[INDEX(i)];
}
#endif
if ( T_ID == 0 ){
*d_mem->rhs_call += 1;
printf("t = %0.5g; rhs_call = %d\n", t, *d_mem->rhs_call );
}
if ( T_ID == 0 ){
printf("time = %0.5g, at temp = %0.5g\n", t, T_local);
for (int i = 0; i< 10; i++){
printf("from tid[%d]: dy[%d] = %0.5g, y = %0.5g at t = %0.5g \n", T_ID, i, dy[INDEX(i)], y_in[INDEX(i)], t);
}
}
// printf(" \n");
// }
}
|
05b57e4590a84bc74d9109b95bd04091a2121910.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/scan.h>
#include <thrust/unique.h>
#include "cuda_utils.h"
#include "parameters.h"
#include "seed_filter.h"
#include "seed_filter_interface.h"
#include "store.h"
#include "store_gpu.h"
// Each segment is 16B
// With 64MB for the HSPs array per 1GB GPU memory
// With higher GPU memory, the size just linearly increases
#define MAX_HITS_PER_GB 4194304
int MAX_SEEDS;
int MAX_HITS;
uint32_t seed_size;
int **d_sub_mat;
int xdrop;
int hspthresh;
bool noentropy;
char** d_seq_rc;
uint64_t** d_seed_offsets;
uint32_t** d_hit_num_array;
std::vector<thrust::device_vector<uint32_t> > d_hit_num_vec;
uint32_t** d_done;
std::vector<thrust::device_vector<uint32_t> > d_done_vec;
segmentPair** d_hsp;
std::vector<thrust::device_vector<segmentPair> > d_hsp_vec;
segmentPair** d_hsp_reduced;
std::vector<thrust::device_vector<segmentPair> > d_hsp_reduced_vec;
struct hspDiagEqual{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
return ( ( (x.ref_start - x.query_start) == (y.ref_start - y.query_start) ) && ( ( (x.ref_start >= y.ref_start) && ( (x.ref_start + x.len) <= (y.ref_start + y.len) ) ) || ( ( y.ref_start >= x.ref_start ) && ( (y.ref_start + y.len) <= (x.ref_start + x.len) ) ) ) );
}
};
struct hspDiagComp{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
if((x.ref_start - x.query_start) < (y.ref_start - y.query_start))
return true;
else if((x.ref_start - x.query_start) == (y.ref_start - y.query_start)){
if(x.ref_start < y.ref_start)
return true;
else if(x.ref_start == y.ref_start){
if(x.query_start < y.query_start)
return true;
else if(x.query_start == y.query_start){
if(x.score > y.score)
return true;
else
return false;
}
else
return false;
}
else
return false;
}
else
return false;
}
};
struct hspEqual{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
return ((x.ref_start == y.ref_start) && (x.query_start == y.query_start) && (x.len == y.len) && (x.score == y.score));
}
};
struct hspFinalComp{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
if(x.query_start < y.query_start)
return true;
else if(x.query_start == y.query_start){
if(x.score > y.score)
return true;
else if(x.score == y.score){
if(x.ref_start > y.ref_start)
return true;
else
return false;
}
else
return false;
}
else
return false;
}
};
struct hspComp{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
if(x.query_start < y.query_start)
return true;
else if(x.query_start == y.query_start){
if(x.len > y.len)
return true;
else if(x.len == y.len){
if(x.ref_start < y.ref_start)
return true;
else if(x.ref_start == y.ref_start){
if(x.score > y.score)
return true;
else
return false;
}
else
return false;
}
else
return false;
}
else
return false;
}
};
__global__
void rev_comp_string (uint32_t len, char* src_seq, char* dst_seq){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
for (uint32_t i = start; i < len; i += stride) {
char ch = src_seq[i];
char dst_rc = X_NT;
if (ch == A_NT){
dst_rc = T_NT;
}
else if (ch == C_NT){
dst_rc = G_NT;
}
else if (ch == G_NT){
dst_rc = C_NT;
}
else if (ch == T_NT){
dst_rc = A_NT;
}
else {
dst_rc = ch;
}
dst_seq[len -1 -i] = dst_rc;
}
}
__global__
void find_num_hits (int num_seeds, const uint32_t* __restrict__ d_index_table, uint64_t* seed_offsets, uint32_t* seed_hit_num){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
uint32_t num_seed_hit;
uint32_t seed;
for (uint32_t id = start; id < num_seeds; id += stride) {
seed = (seed_offsets[id] >> 32);
// start and end from the seed block_id table
num_seed_hit = d_index_table[seed];
if (seed > 0){
num_seed_hit -= d_index_table[seed-1];
}
seed_hit_num[id] = num_seed_hit;
}
}
__global__
void find_hits (const uint32_t* __restrict__ d_index_table, const uint32_t* __restrict__ d_pos_table, uint64_t* d_seed_offsets, uint32_t seed_size, uint32_t* seed_hit_num, int num_hits, segmentPair* d_hsp, uint32_t start_seed_index, uint32_t start_hit_index, uint32_t ref_start, uint32_t ref_end){
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int warp_size = warpSize;
int lane_id = thread_id%warp_size;
int warp_id = (thread_id-lane_id)/warp_size;
__shared__ uint32_t start, end;
__shared__ uint32_t seed;
__shared__ uint64_t seed_offset;
__shared__ uint32_t ref_loc[NUM_WARPS];
__shared__ uint32_t query_loc;
__shared__ uint32_t seed_hit_prefix;
if(thread_id == 0){
seed_offset = d_seed_offsets[block_id+start_seed_index];
seed = (seed_offset >> 32);
query_loc = ((seed_offset << 32) >> 32) + seed_size;
// start and end from the seed block_id table
end = d_index_table[seed];
start = 0;
if (seed > 0){
start = d_index_table[seed-1];
}
seed_hit_prefix = seed_hit_num[block_id+start_seed_index];
}
__syncthreads();
for (int id1 = start; id1 < end; id1 += NUM_WARPS) {
if(id1+warp_id < end){
if(lane_id == 0){
ref_loc[warp_id] = d_pos_table[id1+warp_id] + seed_size;
int dram_address = seed_hit_prefix -id1 - warp_id+start-1-start_hit_index;
d_hsp[dram_address].ref_start = ref_loc[warp_id];
d_hsp[dram_address].query_start = query_loc;
d_hsp[dram_address].len = 0;
if(ref_loc[warp_id] >= ref_start && ref_loc[warp_id] <= ref_end){
d_hsp[dram_address].score = 0;
}
else{
d_hsp[dram_address].score = -1;
}
}
}
}
}
__global__
void find_hsps (const char* __restrict__ d_ref_seq, const char* __restrict__ d_query_seq, uint32_t ref_len, uint32_t query_len, int *d_sub_mat, bool noentropy, int xdrop, int hspthresh, int num_hits, segmentPair* d_hsp, uint32_t* d_done){
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int num_blocks = gridDim.x;
int warp_size = warpSize;
int lane_id = thread_id%warp_size;
int warp_id = (thread_id-lane_id)/warp_size;
__shared__ uint32_t ref_loc[NUM_WARPS];
__shared__ uint32_t query_loc[NUM_WARPS];
__shared__ int total_score[NUM_WARPS];
__shared__ int prev_score[NUM_WARPS];
__shared__ int prev_max_score[NUM_WARPS];
__shared__ int prev_max_pos[NUM_WARPS];
__shared__ bool find_hsp[NUM_WARPS];
__shared__ bool edge_found[NUM_WARPS];
__shared__ bool xdrop_found[NUM_WARPS];
__shared__ bool new_max_found[NUM_WARPS];
__shared__ uint32_t left_extent[NUM_WARPS];
__shared__ int extent[NUM_WARPS];
__shared__ uint32_t tile[NUM_WARPS];
__shared__ double entropy[NUM_WARPS];
int thread_score;
int max_thread_score;
int max_pos;
int temp_pos;
bool xdrop_done;
bool temp_xdrop_done;
int temp;
short count[4];
short count_del[4];
char r_chr;
char q_chr;
uint32_t ref_pos;
uint32_t query_pos;
int pos_offset;
__shared__ int sub_mat[NUC2];
if(thread_id < NUC2){
sub_mat[thread_id] = d_sub_mat[thread_id];
}
__syncthreads();
for(int hid0 = block_id*NUM_WARPS; hid0 < num_hits; hid0 += NUM_WARPS*num_blocks){
int hid = hid0 + warp_id;
if(hid < num_hits){
if(lane_id == 0){
ref_loc[warp_id] = d_hsp[hid].ref_start;
query_loc[warp_id] = d_hsp[hid].query_start;
total_score[warp_id] = 0;
if(d_hsp[hid].score < 0){
find_hsp[warp_id] = false;
}
else{
find_hsp[warp_id] = true;
}
}
}
else{
if(lane_id == 0){
ref_loc[warp_id] = d_hsp[hid0].ref_start;
query_loc[warp_id] = d_hsp[hid0].query_start;
total_score[warp_id] = 0;
}
}
__syncwarp();
//////////////////////////////////////////////////////////////////
//Right extension
if(lane_id ==0){
if(find_hsp[warp_id]){
edge_found[warp_id] = false;
}
else{
edge_found[warp_id] = true;
}
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
new_max_found[warp_id] = false;
entropy[warp_id] = 1.0f;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = -1;
extent[warp_id] = 0;
}
count[0] = 0;
count[1] = 0;
count[2] = 0;
count[3] = 0;
count_del[0] = 0;
count_del[1] = 0;
count_del[2] = 0;
count_del[3] = 0;
max_pos = 0;
__syncwarp();
while(!xdrop_found[warp_id] && !edge_found[warp_id]){
pos_offset = lane_id + tile[warp_id];
ref_pos = ref_loc[warp_id] + pos_offset;
query_pos = query_loc[warp_id] + pos_offset;
thread_score = 0;
if(ref_pos < ref_len && query_pos < query_len){
r_chr = d_ref_seq[ref_pos];
q_chr = d_query_seq[query_pos];
thread_score = sub_mat[r_chr*NUC+q_chr];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if(lane_id >= offset){
thread_score += temp;
}
}
thread_score += prev_score[warp_id];
if(thread_score > prev_max_score[warp_id]){
max_thread_score = thread_score;
max_pos = pos_offset;
}
else{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
xdrop_done = ((max_thread_score-thread_score) > xdrop);
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if(lane_id >= offset){
xdrop_done |= temp_xdrop_done;
}
}
if(xdrop_done == 1){
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
__syncwarp();
if(lane_id == warp_size-1){
if(max_pos > prev_max_pos[warp_id])
new_max_found[warp_id] = true;
else
new_max_found[warp_id] = false;
if(xdrop_done){
total_score[warp_id] += max_thread_score;
xdrop_found[warp_id] = true;
extent[warp_id] = max_pos;
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else if(ref_pos >= ref_len || query_pos >= query_len){
total_score[warp_id] += max_thread_score;
edge_found[warp_id] = true;
extent[warp_id] = max_pos;
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
prev_max_pos[warp_id] = max_pos;
tile[warp_id]+= warp_size;
}
}
__syncwarp();
if(new_max_found[warp_id]){
for(int i = 0; i < 4; i++){
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if(r_chr == q_chr){
if(pos_offset <= prev_max_pos[warp_id]){
count[r_chr] += 1;
}
else{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
__syncwarp();
////////////////////////////////////////////////////////////////
//Left extension
if(lane_id ==0){
if(find_hsp[warp_id]){
edge_found[warp_id] = false;
}
else{
edge_found[warp_id] = true;
}
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
new_max_found[warp_id] = false;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = 0;
left_extent[warp_id] = 0;
}
count_del[0] = 0;
count_del[1] = 0;
count_del[2] = 0;
count_del[3] = 0;
max_pos = 0;
__syncwarp();
while(!xdrop_found[warp_id] && !edge_found[warp_id]){
pos_offset = lane_id+1+tile[warp_id];
thread_score = 0;
if(ref_loc[warp_id] >= pos_offset && query_loc[warp_id] >= pos_offset){
ref_pos = ref_loc[warp_id] - pos_offset;
query_pos = query_loc[warp_id] - pos_offset;
r_chr = d_ref_seq[ref_pos];
q_chr = d_query_seq[query_pos];
thread_score = sub_mat[r_chr*NUC+q_chr];
}
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if(lane_id >= offset){
thread_score += temp;
}
}
thread_score += prev_score[warp_id];
if(thread_score > prev_max_score[warp_id]){
max_thread_score = thread_score;
max_pos = pos_offset;
}
else{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
xdrop_done = ((max_thread_score-thread_score) > xdrop);
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if(lane_id >= offset){
xdrop_done |= temp_xdrop_done;
}
}
if(xdrop_done == 1){
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
__syncwarp();
if(lane_id == warp_size-1){
if(max_pos > prev_max_pos[warp_id])
new_max_found[warp_id] = true;
else
new_max_found[warp_id] = false;
if(xdrop_done){
total_score[warp_id]+=max_thread_score;
xdrop_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else if(ref_loc[warp_id] < pos_offset || query_loc[warp_id] < pos_offset){
total_score[warp_id]+=max_thread_score;
edge_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
prev_max_pos[warp_id] = max_pos;
tile[warp_id]+=warp_size;
}
}
__syncwarp();
if(new_max_found[warp_id]){
for(int i = 0; i < 4; i++){
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if(r_chr == q_chr){
if(pos_offset <= prev_max_pos[warp_id]){
count[r_chr] += 1;
}
else{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
//////////////////////////////////////////////////////////////////
if(total_score[warp_id] >= hspthresh && total_score[warp_id] <= 3*hspthresh && !noentropy){
for(int i = 0; i < 4; i++){
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
count[i] += __shfl_up_sync(0xFFFFFFFF, count[i], offset);
}
}
__syncwarp();
if(lane_id == warp_size-1 && ((count[0]+count[1]+count[2]+count[3]) >= 20)){
entropy[warp_id] = 0.f;
for(int i = 0; i < 4; i++){
entropy[warp_id] += ((double) count[i])/((double) (extent[warp_id]+1)) * ((count[i] != 0) ? log(((double) count[i]) / ((double) (extent[warp_id]+1))): 0.f);
}
entropy[warp_id] = -entropy[warp_id]/log(4.0f);
}
}
__syncwarp();
//////////////////////////////////////////////////////////////////
if(hid < num_hits){
if(lane_id == 0){
if( ((int) (((float) total_score[warp_id]) * entropy[warp_id])) >= hspthresh){
d_hsp[hid].ref_start = ref_loc[warp_id] - left_extent[warp_id];
d_hsp[hid].query_start = query_loc[warp_id] - left_extent[warp_id];
d_hsp[hid].len = extent[warp_id];
if(entropy[warp_id] > 0)
d_hsp[hid].score = total_score[warp_id]*entropy[warp_id];
d_done[hid] = 1;
}
else{
d_hsp[hid].ref_start = ref_loc[warp_id];
d_hsp[hid].query_start = query_loc[warp_id];
d_hsp[hid].len = 0;
d_hsp[hid].score = 0;
d_done[hid] = 0;
}
}
}
__syncwarp();
}
}
__global__
void compress_output (uint32_t* d_done, segmentPair* d_hsp, segmentPair* d_hsp_reduced, int num_hits, bool rev, uint32_t len){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
int index = 0;
for (uint32_t id = start; id < num_hits; id += stride) {
index = d_done[id];
segmentPair output = d_hsp[id];
if(rev)
output.query_start = len - 1 - (output.query_start + output.len);
if(id > 0){
if(index > d_done[id-1]){
d_hsp_reduced[index-1] = output;
}
}
else{
if(index == 1){
d_hsp_reduced[0] = output;
}
}
}
}
std::vector<segmentPair> SeedAndFilter (std::vector<uint64_t> seed_offset_vector, bool rev, uint32_t ref_start, uint32_t ref_end){
uint32_t num_hits = 0;
uint32_t total_anchors = 0;
uint32_t num_seeds = seed_offset_vector.size();
assert(num_seeds <= MAX_SEEDS);
uint64_t* tmp_offset = (uint64_t*) malloc(num_seeds*sizeof(uint64_t));
for (uint32_t i = 0; i < num_seeds; i++) {
tmp_offset[i] = seed_offset_vector[i];
}
int g;
std::unique_lock<std::mutex> locker(mu);
if (available_gpus.empty()) {
cv.wait(locker, [](){return !available_gpus.empty();});
}
g = available_gpus.back();
available_gpus.pop_back();
locker.unlock();
check_cuda_setDevice(g, "SeedAndFilter");
check_cuda_memcpy((void*)d_seed_offsets[g], (void*)tmp_offset, num_seeds*sizeof(uint64_t), hipMemcpyHostToDevice, "seed_offsets");
hipLaunchKernelGGL(( find_num_hits) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, num_seeds, d_index_table[g], d_seed_offsets[g], d_hit_num_array[g]);
thrust::inclusive_scan(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin() + num_seeds, d_hit_num_vec[g].begin());
check_cuda_memcpy((void*)&num_hits, (void*)(d_hit_num_array[g]+num_seeds-1), sizeof(uint32_t), hipMemcpyDeviceToHost, "num_hits");
int num_iter = num_hits/MAX_HITS+1;
uint32_t iter_hit_limit = MAX_HITS;
thrust::device_vector<uint32_t> limit_pos (num_iter);
for(int i = 0; i < num_iter-1; i++){
thrust::device_vector<uint32_t>::iterator result_end = thrust::lower_bound(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin()+num_seeds, iter_hit_limit);
uint32_t pos = thrust::distance(d_hit_num_vec[g].begin(), result_end)-1;
iter_hit_limit = d_hit_num_vec[g][pos]+MAX_HITS;
limit_pos[i] = pos;
}
limit_pos[num_iter-1] = num_seeds-1;
segmentPair** h_hsp = (segmentPair**) malloc(num_iter*sizeof(segmentPair*));
uint32_t* num_anchors = (uint32_t*) calloc(num_iter, sizeof(uint32_t));
uint32_t start_seed_index = 0;
uint32_t start_hit_val = 0;
uint32_t iter_num_seeds, iter_num_hits;
if(num_hits > 0){
for(int i = 0; i < num_iter; i++){
iter_num_seeds = limit_pos[i] + 1 - start_seed_index;
iter_num_hits = d_hit_num_vec[g][limit_pos[i]] - start_hit_val;
hipLaunchKernelGGL(( find_hits) , dim3(iter_num_seeds), dim3(BLOCK_SIZE), 0, 0, d_index_table[g], d_pos_table[g], d_seed_offsets[g], seed_size, d_hit_num_array[g], iter_num_hits, d_hsp[g], start_seed_index, start_hit_val, ref_start, ref_end);
if(rev){
hipLaunchKernelGGL(( find_hsps) , dim3(1024), dim3(BLOCK_SIZE), 0, 0, d_ref_seq[g], d_seq_rc[g], ref_len, ref_len, d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]);
}
else{
hipLaunchKernelGGL(( find_hsps) , dim3(1024), dim3(BLOCK_SIZE), 0, 0, d_ref_seq[g], d_ref_seq[g], ref_len, ref_len, d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]);
}
thrust::inclusive_scan(d_done_vec[g].begin(), d_done_vec[g].begin() + iter_num_hits, d_done_vec[g].begin());
check_cuda_memcpy((void*)&num_anchors[i], (void*)(d_done[g]+iter_num_hits-1), sizeof(uint32_t), hipMemcpyDeviceToHost, "num_anchors");
if(num_anchors[i] > 0){
hipLaunchKernelGGL(( compress_output) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, d_done[g], d_hsp[g], d_hsp_reduced[g], iter_num_hits, rev, ref_len);
thrust::stable_sort(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], hspComp());
thrust::device_vector<segmentPair>::iterator result_end = thrust::unique_copy(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], d_hsp_vec[g].begin(), hspEqual());
num_anchors[i] = thrust::distance(d_hsp_vec[g].begin(), result_end), num_anchors[i];
thrust::stable_sort(d_hsp_vec[g].begin(), d_hsp_vec[g].begin()+num_anchors[i], hspDiagComp());
thrust::device_vector<segmentPair>::iterator result_end2 = thrust::unique_copy(d_hsp_vec[g].begin(), d_hsp_vec[g].begin()+num_anchors[i], d_hsp_reduced_vec[g].begin(), hspDiagEqual());
num_anchors[i] = thrust::distance(d_hsp_reduced_vec[g].begin(), result_end2), num_anchors[i];
thrust::stable_sort(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], hspFinalComp());
total_anchors += num_anchors[i];
h_hsp[i] = (segmentPair*) calloc(num_anchors[i], sizeof(segmentPair));
check_cuda_memcpy((void*)h_hsp[i], (void*)d_hsp_reduced[g], num_anchors[i]*sizeof(segmentPair), hipMemcpyDeviceToHost, "hsp_output");
}
start_seed_index = limit_pos[i] + 1;
start_hit_val = d_hit_num_vec[g][limit_pos[i]];
}
}
limit_pos.clear();
{
std::unique_lock<std::mutex> locker(mu);
available_gpus.push_back(g);
locker.unlock();
cv.notify_one();
}
std::vector<segmentPair> gpu_filter_output;
segmentPair first_el;
first_el.len = total_anchors;
first_el.score = num_hits;
gpu_filter_output.push_back(first_el);
if(total_anchors > 0){
for(int it = 0; it < num_iter; it++){
for(int i = 0; i < num_anchors[it]; i++){
gpu_filter_output.push_back(h_hsp[it][i]);
}
if(num_anchors[it] > 0){
free(h_hsp[it]);
}
}
}
free(h_hsp);
free(num_anchors);
free(tmp_offset);
return gpu_filter_output;
}
void InitializeProcessor (bool transition, uint32_t WGA_CHUNK, uint32_t input_seed_size, int* sub_mat, int input_xdrop, int input_hspthresh, bool input_noentropy){
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
float global_mem_gb = static_cast<float>(deviceProp.totalGlobalMem / 1073741824.0f);
if(transition)
MAX_SEEDS = 13*WGA_CHUNK;
else
MAX_SEEDS = WGA_CHUNK;
MAX_HITS = MAX_HITS_PER_GB*global_mem_gb;
seed_size = input_seed_size;
xdrop = input_xdrop;
hspthresh = input_hspthresh;
noentropy = input_noentropy;
d_sub_mat = (int**) malloc(NUM_DEVICES*sizeof(int*));
d_seq_rc = (char**) malloc(NUM_DEVICES*sizeof(char*));
d_seed_offsets = (uint64_t**) malloc(NUM_DEVICES*sizeof(uint64_t*));
d_hit_num_array = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
d_hit_num_vec.reserve(NUM_DEVICES);
d_done = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
d_done_vec.reserve(NUM_DEVICES);
d_hsp = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*));
d_hsp_vec.reserve(NUM_DEVICES);
d_hsp_reduced = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*));
d_hsp_reduced_vec.reserve(NUM_DEVICES);
segmentPair zeroHsp;
zeroHsp.ref_start = 0;
zeroHsp.query_start = 0;
zeroHsp.len = 0;
zeroHsp.score = 0;
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "InitializeProcessor");
check_cuda_malloc((void**)&d_sub_mat[g], NUC2*sizeof(int), "sub_mat");
check_cuda_memcpy((void*)d_sub_mat[g], (void*)sub_mat, NUC2*sizeof(int), hipMemcpyHostToDevice, "sub_mat");
check_cuda_malloc((void**)&d_seed_offsets[g], MAX_SEEDS*sizeof(uint64_t), "seed_offsets");
d_hit_num_vec.emplace_back(MAX_SEEDS, 0);
d_hit_num_array[g] = thrust::raw_pointer_cast(d_hit_num_vec.at(g).data());
d_done_vec.emplace_back(MAX_HITS, 0);
d_done[g] = thrust::raw_pointer_cast(d_done_vec.at(g).data());
d_hsp_vec.emplace_back(MAX_HITS, zeroHsp);
d_hsp[g] = thrust::raw_pointer_cast(d_hsp_vec.at(g).data());
d_hsp_reduced_vec.emplace_back(MAX_HITS, zeroHsp);
d_hsp_reduced[g] = thrust::raw_pointer_cast(d_hsp_reduced_vec.at(g).data());
available_gpus.push_back(g);
}
}
void SendQueryWriteRequest (){
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "SendQueryWriteRequest");
check_cuda_malloc((void**)&d_seq_rc[g], ref_len*sizeof(char), "seq_rc");
hipLaunchKernelGGL(( rev_comp_string) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, ref_len, d_ref_seq[g], d_seq_rc[g]);
}
}
void ClearQuery(){
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "ClearQuery");
check_cuda_free((void*)d_seq_rc[g], "d_seq_rc");
}
}
void ShutdownProcessor(){
d_done_vec.clear();
d_hit_num_vec.clear();
d_hsp_vec.clear();
d_hsp_reduced_vec.clear();
hipDeviceReset();
}
InitializeProcessor_ptr g_InitializeProcessor = InitializeProcessor;
SendQueryWriteRequest_ptr g_SendQueryWriteRequest = SendQueryWriteRequest;
SeedAndFilter_ptr g_SeedAndFilter = SeedAndFilter;
ClearQuery_ptr g_ClearQuery = ClearQuery;
ShutdownProcessor_ptr g_ShutdownProcessor = ShutdownProcessor;
| 05b57e4590a84bc74d9109b95bd04091a2121910.cu | #include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/scan.h>
#include <thrust/unique.h>
#include "cuda_utils.h"
#include "parameters.h"
#include "seed_filter.h"
#include "seed_filter_interface.h"
#include "store.h"
#include "store_gpu.h"
// Each segment is 16B
// With 64MB for the HSPs array per 1GB GPU memory
// With higher GPU memory, the size just linearly increases
#define MAX_HITS_PER_GB 4194304
int MAX_SEEDS;
int MAX_HITS;
uint32_t seed_size;
int **d_sub_mat;
int xdrop;
int hspthresh;
bool noentropy;
char** d_seq_rc;
uint64_t** d_seed_offsets;
uint32_t** d_hit_num_array;
std::vector<thrust::device_vector<uint32_t> > d_hit_num_vec;
uint32_t** d_done;
std::vector<thrust::device_vector<uint32_t> > d_done_vec;
segmentPair** d_hsp;
std::vector<thrust::device_vector<segmentPair> > d_hsp_vec;
segmentPair** d_hsp_reduced;
std::vector<thrust::device_vector<segmentPair> > d_hsp_reduced_vec;
struct hspDiagEqual{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
return ( ( (x.ref_start - x.query_start) == (y.ref_start - y.query_start) ) && ( ( (x.ref_start >= y.ref_start) && ( (x.ref_start + x.len) <= (y.ref_start + y.len) ) ) || ( ( y.ref_start >= x.ref_start ) && ( (y.ref_start + y.len) <= (x.ref_start + x.len) ) ) ) );
}
};
struct hspDiagComp{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
if((x.ref_start - x.query_start) < (y.ref_start - y.query_start))
return true;
else if((x.ref_start - x.query_start) == (y.ref_start - y.query_start)){
if(x.ref_start < y.ref_start)
return true;
else if(x.ref_start == y.ref_start){
if(x.query_start < y.query_start)
return true;
else if(x.query_start == y.query_start){
if(x.score > y.score)
return true;
else
return false;
}
else
return false;
}
else
return false;
}
else
return false;
}
};
struct hspEqual{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
return ((x.ref_start == y.ref_start) && (x.query_start == y.query_start) && (x.len == y.len) && (x.score == y.score));
}
};
struct hspFinalComp{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
if(x.query_start < y.query_start)
return true;
else if(x.query_start == y.query_start){
if(x.score > y.score)
return true;
else if(x.score == y.score){
if(x.ref_start > y.ref_start)
return true;
else
return false;
}
else
return false;
}
else
return false;
}
};
struct hspComp{
__host__ __device__
bool operator()(segmentPair x, segmentPair y){
if(x.query_start < y.query_start)
return true;
else if(x.query_start == y.query_start){
if(x.len > y.len)
return true;
else if(x.len == y.len){
if(x.ref_start < y.ref_start)
return true;
else if(x.ref_start == y.ref_start){
if(x.score > y.score)
return true;
else
return false;
}
else
return false;
}
else
return false;
}
else
return false;
}
};
__global__
void rev_comp_string (uint32_t len, char* src_seq, char* dst_seq){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
for (uint32_t i = start; i < len; i += stride) {
char ch = src_seq[i];
char dst_rc = X_NT;
if (ch == A_NT){
dst_rc = T_NT;
}
else if (ch == C_NT){
dst_rc = G_NT;
}
else if (ch == G_NT){
dst_rc = C_NT;
}
else if (ch == T_NT){
dst_rc = A_NT;
}
else {
dst_rc = ch;
}
dst_seq[len -1 -i] = dst_rc;
}
}
__global__
void find_num_hits (int num_seeds, const uint32_t* __restrict__ d_index_table, uint64_t* seed_offsets, uint32_t* seed_hit_num){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
uint32_t num_seed_hit;
uint32_t seed;
for (uint32_t id = start; id < num_seeds; id += stride) {
seed = (seed_offsets[id] >> 32);
// start and end from the seed block_id table
num_seed_hit = d_index_table[seed];
if (seed > 0){
num_seed_hit -= d_index_table[seed-1];
}
seed_hit_num[id] = num_seed_hit;
}
}
__global__
void find_hits (const uint32_t* __restrict__ d_index_table, const uint32_t* __restrict__ d_pos_table, uint64_t* d_seed_offsets, uint32_t seed_size, uint32_t* seed_hit_num, int num_hits, segmentPair* d_hsp, uint32_t start_seed_index, uint32_t start_hit_index, uint32_t ref_start, uint32_t ref_end){
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int warp_size = warpSize;
int lane_id = thread_id%warp_size;
int warp_id = (thread_id-lane_id)/warp_size;
__shared__ uint32_t start, end;
__shared__ uint32_t seed;
__shared__ uint64_t seed_offset;
__shared__ uint32_t ref_loc[NUM_WARPS];
__shared__ uint32_t query_loc;
__shared__ uint32_t seed_hit_prefix;
if(thread_id == 0){
seed_offset = d_seed_offsets[block_id+start_seed_index];
seed = (seed_offset >> 32);
query_loc = ((seed_offset << 32) >> 32) + seed_size;
// start and end from the seed block_id table
end = d_index_table[seed];
start = 0;
if (seed > 0){
start = d_index_table[seed-1];
}
seed_hit_prefix = seed_hit_num[block_id+start_seed_index];
}
__syncthreads();
for (int id1 = start; id1 < end; id1 += NUM_WARPS) {
if(id1+warp_id < end){
if(lane_id == 0){
ref_loc[warp_id] = d_pos_table[id1+warp_id] + seed_size;
int dram_address = seed_hit_prefix -id1 - warp_id+start-1-start_hit_index;
d_hsp[dram_address].ref_start = ref_loc[warp_id];
d_hsp[dram_address].query_start = query_loc;
d_hsp[dram_address].len = 0;
if(ref_loc[warp_id] >= ref_start && ref_loc[warp_id] <= ref_end){
d_hsp[dram_address].score = 0;
}
else{
d_hsp[dram_address].score = -1;
}
}
}
}
}
__global__
void find_hsps (const char* __restrict__ d_ref_seq, const char* __restrict__ d_query_seq, uint32_t ref_len, uint32_t query_len, int *d_sub_mat, bool noentropy, int xdrop, int hspthresh, int num_hits, segmentPair* d_hsp, uint32_t* d_done){
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int num_blocks = gridDim.x;
int warp_size = warpSize;
int lane_id = thread_id%warp_size;
int warp_id = (thread_id-lane_id)/warp_size;
__shared__ uint32_t ref_loc[NUM_WARPS];
__shared__ uint32_t query_loc[NUM_WARPS];
__shared__ int total_score[NUM_WARPS];
__shared__ int prev_score[NUM_WARPS];
__shared__ int prev_max_score[NUM_WARPS];
__shared__ int prev_max_pos[NUM_WARPS];
__shared__ bool find_hsp[NUM_WARPS];
__shared__ bool edge_found[NUM_WARPS];
__shared__ bool xdrop_found[NUM_WARPS];
__shared__ bool new_max_found[NUM_WARPS];
__shared__ uint32_t left_extent[NUM_WARPS];
__shared__ int extent[NUM_WARPS];
__shared__ uint32_t tile[NUM_WARPS];
__shared__ double entropy[NUM_WARPS];
int thread_score;
int max_thread_score;
int max_pos;
int temp_pos;
bool xdrop_done;
bool temp_xdrop_done;
int temp;
short count[4];
short count_del[4];
char r_chr;
char q_chr;
uint32_t ref_pos;
uint32_t query_pos;
int pos_offset;
__shared__ int sub_mat[NUC2];
if(thread_id < NUC2){
sub_mat[thread_id] = d_sub_mat[thread_id];
}
__syncthreads();
for(int hid0 = block_id*NUM_WARPS; hid0 < num_hits; hid0 += NUM_WARPS*num_blocks){
int hid = hid0 + warp_id;
if(hid < num_hits){
if(lane_id == 0){
ref_loc[warp_id] = d_hsp[hid].ref_start;
query_loc[warp_id] = d_hsp[hid].query_start;
total_score[warp_id] = 0;
if(d_hsp[hid].score < 0){
find_hsp[warp_id] = false;
}
else{
find_hsp[warp_id] = true;
}
}
}
else{
if(lane_id == 0){
ref_loc[warp_id] = d_hsp[hid0].ref_start;
query_loc[warp_id] = d_hsp[hid0].query_start;
total_score[warp_id] = 0;
}
}
__syncwarp();
//////////////////////////////////////////////////////////////////
//Right extension
if(lane_id ==0){
if(find_hsp[warp_id]){
edge_found[warp_id] = false;
}
else{
edge_found[warp_id] = true;
}
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
new_max_found[warp_id] = false;
entropy[warp_id] = 1.0f;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = -1;
extent[warp_id] = 0;
}
count[0] = 0;
count[1] = 0;
count[2] = 0;
count[3] = 0;
count_del[0] = 0;
count_del[1] = 0;
count_del[2] = 0;
count_del[3] = 0;
max_pos = 0;
__syncwarp();
while(!xdrop_found[warp_id] && !edge_found[warp_id]){
pos_offset = lane_id + tile[warp_id];
ref_pos = ref_loc[warp_id] + pos_offset;
query_pos = query_loc[warp_id] + pos_offset;
thread_score = 0;
if(ref_pos < ref_len && query_pos < query_len){
r_chr = d_ref_seq[ref_pos];
q_chr = d_query_seq[query_pos];
thread_score = sub_mat[r_chr*NUC+q_chr];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if(lane_id >= offset){
thread_score += temp;
}
}
thread_score += prev_score[warp_id];
if(thread_score > prev_max_score[warp_id]){
max_thread_score = thread_score;
max_pos = pos_offset;
}
else{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
xdrop_done = ((max_thread_score-thread_score) > xdrop);
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if(lane_id >= offset){
xdrop_done |= temp_xdrop_done;
}
}
if(xdrop_done == 1){
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
__syncwarp();
if(lane_id == warp_size-1){
if(max_pos > prev_max_pos[warp_id])
new_max_found[warp_id] = true;
else
new_max_found[warp_id] = false;
if(xdrop_done){
total_score[warp_id] += max_thread_score;
xdrop_found[warp_id] = true;
extent[warp_id] = max_pos;
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else if(ref_pos >= ref_len || query_pos >= query_len){
total_score[warp_id] += max_thread_score;
edge_found[warp_id] = true;
extent[warp_id] = max_pos;
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
prev_max_pos[warp_id] = max_pos;
tile[warp_id]+= warp_size;
}
}
__syncwarp();
if(new_max_found[warp_id]){
for(int i = 0; i < 4; i++){
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if(r_chr == q_chr){
if(pos_offset <= prev_max_pos[warp_id]){
count[r_chr] += 1;
}
else{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
__syncwarp();
////////////////////////////////////////////////////////////////
//Left extension
if(lane_id ==0){
if(find_hsp[warp_id]){
edge_found[warp_id] = false;
}
else{
edge_found[warp_id] = true;
}
tile[warp_id] = 0;
xdrop_found[warp_id] = false;
new_max_found[warp_id] = false;
prev_score[warp_id] = 0;
prev_max_score[warp_id] = 0;
prev_max_pos[warp_id] = 0;
left_extent[warp_id] = 0;
}
count_del[0] = 0;
count_del[1] = 0;
count_del[2] = 0;
count_del[3] = 0;
max_pos = 0;
__syncwarp();
while(!xdrop_found[warp_id] && !edge_found[warp_id]){
pos_offset = lane_id+1+tile[warp_id];
thread_score = 0;
if(ref_loc[warp_id] >= pos_offset && query_loc[warp_id] >= pos_offset){
ref_pos = ref_loc[warp_id] - pos_offset;
query_pos = query_loc[warp_id] - pos_offset;
r_chr = d_ref_seq[ref_pos];
q_chr = d_query_seq[query_pos];
thread_score = sub_mat[r_chr*NUC+q_chr];
}
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset);
if(lane_id >= offset){
thread_score += temp;
}
}
thread_score += prev_score[warp_id];
if(thread_score > prev_max_score[warp_id]){
max_thread_score = thread_score;
max_pos = pos_offset;
}
else{
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
xdrop_done = ((max_thread_score-thread_score) > xdrop);
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset);
if(lane_id >= offset){
xdrop_done |= temp_xdrop_done;
}
}
if(xdrop_done == 1){
max_thread_score = prev_max_score[warp_id];
max_pos = prev_max_pos[warp_id];
}
__syncwarp();
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset);
temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset);
if(lane_id >= offset){
if(temp >= max_thread_score){
max_thread_score = temp;
max_pos = temp_pos;
}
}
}
__syncwarp();
if(lane_id == warp_size-1){
if(max_pos > prev_max_pos[warp_id])
new_max_found[warp_id] = true;
else
new_max_found[warp_id] = false;
if(xdrop_done){
total_score[warp_id]+=max_thread_score;
xdrop_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else if(ref_loc[warp_id] < pos_offset || query_loc[warp_id] < pos_offset){
total_score[warp_id]+=max_thread_score;
edge_found[warp_id] = true;
left_extent[warp_id] = max_pos;
extent[warp_id] += left_extent[warp_id];
prev_max_pos[warp_id] = max_pos;
tile[warp_id] = max_pos;
}
else{
prev_score[warp_id] = thread_score;
prev_max_score[warp_id] = max_thread_score;
prev_max_pos[warp_id] = max_pos;
tile[warp_id]+=warp_size;
}
}
__syncwarp();
if(new_max_found[warp_id]){
for(int i = 0; i < 4; i++){
count[i] = count[i] + count_del[i];
count_del[i] = 0;
}
}
__syncwarp();
if(r_chr == q_chr){
if(pos_offset <= prev_max_pos[warp_id]){
count[r_chr] += 1;
}
else{
count_del[r_chr] += 1;
}
}
__syncwarp();
}
//////////////////////////////////////////////////////////////////
if(total_score[warp_id] >= hspthresh && total_score[warp_id] <= 3*hspthresh && !noentropy){
for(int i = 0; i < 4; i++){
#pragma unroll
for (int offset = 1; offset < warp_size; offset = offset << 1){
count[i] += __shfl_up_sync(0xFFFFFFFF, count[i], offset);
}
}
__syncwarp();
if(lane_id == warp_size-1 && ((count[0]+count[1]+count[2]+count[3]) >= 20)){
entropy[warp_id] = 0.f;
for(int i = 0; i < 4; i++){
entropy[warp_id] += ((double) count[i])/((double) (extent[warp_id]+1)) * ((count[i] != 0) ? log(((double) count[i]) / ((double) (extent[warp_id]+1))): 0.f);
}
entropy[warp_id] = -entropy[warp_id]/log(4.0f);
}
}
__syncwarp();
//////////////////////////////////////////////////////////////////
if(hid < num_hits){
if(lane_id == 0){
if( ((int) (((float) total_score[warp_id]) * entropy[warp_id])) >= hspthresh){
d_hsp[hid].ref_start = ref_loc[warp_id] - left_extent[warp_id];
d_hsp[hid].query_start = query_loc[warp_id] - left_extent[warp_id];
d_hsp[hid].len = extent[warp_id];
if(entropy[warp_id] > 0)
d_hsp[hid].score = total_score[warp_id]*entropy[warp_id];
d_done[hid] = 1;
}
else{
d_hsp[hid].ref_start = ref_loc[warp_id];
d_hsp[hid].query_start = query_loc[warp_id];
d_hsp[hid].len = 0;
d_hsp[hid].score = 0;
d_done[hid] = 0;
}
}
}
__syncwarp();
}
}
__global__
void compress_output (uint32_t* d_done, segmentPair* d_hsp, segmentPair* d_hsp_reduced, int num_hits, bool rev, uint32_t len){
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
int grid_dim = gridDim.x;
int block_id = blockIdx.x;
int stride = block_dim * grid_dim;
uint32_t start = block_dim * block_id + thread_id;
int index = 0;
for (uint32_t id = start; id < num_hits; id += stride) {
index = d_done[id];
segmentPair output = d_hsp[id];
if(rev)
output.query_start = len - 1 - (output.query_start + output.len);
if(id > 0){
if(index > d_done[id-1]){
d_hsp_reduced[index-1] = output;
}
}
else{
if(index == 1){
d_hsp_reduced[0] = output;
}
}
}
}
std::vector<segmentPair> SeedAndFilter (std::vector<uint64_t> seed_offset_vector, bool rev, uint32_t ref_start, uint32_t ref_end){
uint32_t num_hits = 0;
uint32_t total_anchors = 0;
uint32_t num_seeds = seed_offset_vector.size();
assert(num_seeds <= MAX_SEEDS);
uint64_t* tmp_offset = (uint64_t*) malloc(num_seeds*sizeof(uint64_t));
for (uint32_t i = 0; i < num_seeds; i++) {
tmp_offset[i] = seed_offset_vector[i];
}
int g;
std::unique_lock<std::mutex> locker(mu);
if (available_gpus.empty()) {
cv.wait(locker, [](){return !available_gpus.empty();});
}
g = available_gpus.back();
available_gpus.pop_back();
locker.unlock();
check_cuda_setDevice(g, "SeedAndFilter");
check_cuda_memcpy((void*)d_seed_offsets[g], (void*)tmp_offset, num_seeds*sizeof(uint64_t), cudaMemcpyHostToDevice, "seed_offsets");
find_num_hits <<<MAX_BLOCKS, MAX_THREADS>>> (num_seeds, d_index_table[g], d_seed_offsets[g], d_hit_num_array[g]);
thrust::inclusive_scan(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin() + num_seeds, d_hit_num_vec[g].begin());
check_cuda_memcpy((void*)&num_hits, (void*)(d_hit_num_array[g]+num_seeds-1), sizeof(uint32_t), cudaMemcpyDeviceToHost, "num_hits");
int num_iter = num_hits/MAX_HITS+1;
uint32_t iter_hit_limit = MAX_HITS;
thrust::device_vector<uint32_t> limit_pos (num_iter);
for(int i = 0; i < num_iter-1; i++){
thrust::device_vector<uint32_t>::iterator result_end = thrust::lower_bound(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin()+num_seeds, iter_hit_limit);
uint32_t pos = thrust::distance(d_hit_num_vec[g].begin(), result_end)-1;
iter_hit_limit = d_hit_num_vec[g][pos]+MAX_HITS;
limit_pos[i] = pos;
}
limit_pos[num_iter-1] = num_seeds-1;
segmentPair** h_hsp = (segmentPair**) malloc(num_iter*sizeof(segmentPair*));
uint32_t* num_anchors = (uint32_t*) calloc(num_iter, sizeof(uint32_t));
uint32_t start_seed_index = 0;
uint32_t start_hit_val = 0;
uint32_t iter_num_seeds, iter_num_hits;
if(num_hits > 0){
for(int i = 0; i < num_iter; i++){
iter_num_seeds = limit_pos[i] + 1 - start_seed_index;
iter_num_hits = d_hit_num_vec[g][limit_pos[i]] - start_hit_val;
find_hits <<<iter_num_seeds, BLOCK_SIZE>>> (d_index_table[g], d_pos_table[g], d_seed_offsets[g], seed_size, d_hit_num_array[g], iter_num_hits, d_hsp[g], start_seed_index, start_hit_val, ref_start, ref_end);
if(rev){
find_hsps <<<1024, BLOCK_SIZE>>> (d_ref_seq[g], d_seq_rc[g], ref_len, ref_len, d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]);
}
else{
find_hsps <<<1024, BLOCK_SIZE>>> (d_ref_seq[g], d_ref_seq[g], ref_len, ref_len, d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]);
}
thrust::inclusive_scan(d_done_vec[g].begin(), d_done_vec[g].begin() + iter_num_hits, d_done_vec[g].begin());
check_cuda_memcpy((void*)&num_anchors[i], (void*)(d_done[g]+iter_num_hits-1), sizeof(uint32_t), cudaMemcpyDeviceToHost, "num_anchors");
if(num_anchors[i] > 0){
compress_output <<<MAX_BLOCKS, MAX_THREADS>>>(d_done[g], d_hsp[g], d_hsp_reduced[g], iter_num_hits, rev, ref_len);
thrust::stable_sort(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], hspComp());
thrust::device_vector<segmentPair>::iterator result_end = thrust::unique_copy(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], d_hsp_vec[g].begin(), hspEqual());
num_anchors[i] = thrust::distance(d_hsp_vec[g].begin(), result_end), num_anchors[i];
thrust::stable_sort(d_hsp_vec[g].begin(), d_hsp_vec[g].begin()+num_anchors[i], hspDiagComp());
thrust::device_vector<segmentPair>::iterator result_end2 = thrust::unique_copy(d_hsp_vec[g].begin(), d_hsp_vec[g].begin()+num_anchors[i], d_hsp_reduced_vec[g].begin(), hspDiagEqual());
num_anchors[i] = thrust::distance(d_hsp_reduced_vec[g].begin(), result_end2), num_anchors[i];
thrust::stable_sort(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], hspFinalComp());
total_anchors += num_anchors[i];
h_hsp[i] = (segmentPair*) calloc(num_anchors[i], sizeof(segmentPair));
check_cuda_memcpy((void*)h_hsp[i], (void*)d_hsp_reduced[g], num_anchors[i]*sizeof(segmentPair), cudaMemcpyDeviceToHost, "hsp_output");
}
start_seed_index = limit_pos[i] + 1;
start_hit_val = d_hit_num_vec[g][limit_pos[i]];
}
}
limit_pos.clear();
{
std::unique_lock<std::mutex> locker(mu);
available_gpus.push_back(g);
locker.unlock();
cv.notify_one();
}
std::vector<segmentPair> gpu_filter_output;
segmentPair first_el;
first_el.len = total_anchors;
first_el.score = num_hits;
gpu_filter_output.push_back(first_el);
if(total_anchors > 0){
for(int it = 0; it < num_iter; it++){
for(int i = 0; i < num_anchors[it]; i++){
gpu_filter_output.push_back(h_hsp[it][i]);
}
if(num_anchors[it] > 0){
free(h_hsp[it]);
}
}
}
free(h_hsp);
free(num_anchors);
free(tmp_offset);
return gpu_filter_output;
}
void InitializeProcessor (bool transition, uint32_t WGA_CHUNK, uint32_t input_seed_size, int* sub_mat, int input_xdrop, int input_hspthresh, bool input_noentropy){
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
float global_mem_gb = static_cast<float>(deviceProp.totalGlobalMem / 1073741824.0f);
if(transition)
MAX_SEEDS = 13*WGA_CHUNK;
else
MAX_SEEDS = WGA_CHUNK;
MAX_HITS = MAX_HITS_PER_GB*global_mem_gb;
seed_size = input_seed_size;
xdrop = input_xdrop;
hspthresh = input_hspthresh;
noentropy = input_noentropy;
d_sub_mat = (int**) malloc(NUM_DEVICES*sizeof(int*));
d_seq_rc = (char**) malloc(NUM_DEVICES*sizeof(char*));
d_seed_offsets = (uint64_t**) malloc(NUM_DEVICES*sizeof(uint64_t*));
d_hit_num_array = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
d_hit_num_vec.reserve(NUM_DEVICES);
d_done = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*));
d_done_vec.reserve(NUM_DEVICES);
d_hsp = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*));
d_hsp_vec.reserve(NUM_DEVICES);
d_hsp_reduced = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*));
d_hsp_reduced_vec.reserve(NUM_DEVICES);
segmentPair zeroHsp;
zeroHsp.ref_start = 0;
zeroHsp.query_start = 0;
zeroHsp.len = 0;
zeroHsp.score = 0;
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "InitializeProcessor");
check_cuda_malloc((void**)&d_sub_mat[g], NUC2*sizeof(int), "sub_mat");
check_cuda_memcpy((void*)d_sub_mat[g], (void*)sub_mat, NUC2*sizeof(int), cudaMemcpyHostToDevice, "sub_mat");
check_cuda_malloc((void**)&d_seed_offsets[g], MAX_SEEDS*sizeof(uint64_t), "seed_offsets");
d_hit_num_vec.emplace_back(MAX_SEEDS, 0);
d_hit_num_array[g] = thrust::raw_pointer_cast(d_hit_num_vec.at(g).data());
d_done_vec.emplace_back(MAX_HITS, 0);
d_done[g] = thrust::raw_pointer_cast(d_done_vec.at(g).data());
d_hsp_vec.emplace_back(MAX_HITS, zeroHsp);
d_hsp[g] = thrust::raw_pointer_cast(d_hsp_vec.at(g).data());
d_hsp_reduced_vec.emplace_back(MAX_HITS, zeroHsp);
d_hsp_reduced[g] = thrust::raw_pointer_cast(d_hsp_reduced_vec.at(g).data());
available_gpus.push_back(g);
}
}
void SendQueryWriteRequest (){
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "SendQueryWriteRequest");
check_cuda_malloc((void**)&d_seq_rc[g], ref_len*sizeof(char), "seq_rc");
rev_comp_string <<<MAX_BLOCKS, MAX_THREADS>>> (ref_len, d_ref_seq[g], d_seq_rc[g]);
}
}
void ClearQuery(){
for(int g = 0; g < NUM_DEVICES; g++){
check_cuda_setDevice(g, "ClearQuery");
check_cuda_free((void*)d_seq_rc[g], "d_seq_rc");
}
}
void ShutdownProcessor(){
d_done_vec.clear();
d_hit_num_vec.clear();
d_hsp_vec.clear();
d_hsp_reduced_vec.clear();
cudaDeviceReset();
}
InitializeProcessor_ptr g_InitializeProcessor = InitializeProcessor;
SendQueryWriteRequest_ptr g_SendQueryWriteRequest = SendQueryWriteRequest;
SeedAndFilter_ptr g_SeedAndFilter = SeedAndFilter;
ClearQuery_ptr g_ClearQuery = ClearQuery;
ShutdownProcessor_ptr g_ShutdownProcessor = ShutdownProcessor;
|
f0c77093f38c5b2839f7b432a579c1f2e07d0821.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <torch/cuda.h>
#include "hip_noise.cuh"
#include "bindings.h"
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
using namespace cudaNoise;
#define MAKE_KERNEL_FUN(noiseFunc)\
template<typename scalar_t, typename... Args>\
__global__ void noiseFunc##_kernel (\
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> points,\
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> out,\
Args... args) {\
\
const int x = blockIdx.x * blockDim.x + threadIdx.x;\
if (x < points.size(0)) {\
out[x] = noiseFunc (make_float3(points[x][0],points[x][1],points[x][2]), args...);\
}\
}
MAKE_KERNEL_FUN(simplexNoise)
MAKE_KERNEL_FUN(checker)
MAKE_KERNEL_FUN(spots)
MAKE_KERNEL_FUN(worleyNoise)
MAKE_KERNEL_FUN(worleyNoise_five)
MAKE_KERNEL_FUN(discreteNoise)
MAKE_KERNEL_FUN(linearValue)
MAKE_KERNEL_FUN(fadedValue)
MAKE_KERNEL_FUN(cubicValue)
MAKE_KERNEL_FUN(perlinNoise)
MAKE_KERNEL_FUN(repeaterPerlin)
MAKE_KERNEL_FUN(repeaterPerlinAbs)
MAKE_KERNEL_FUN(repeaterSimplex)
MAKE_KERNEL_FUN(repeaterSimplexAbs)
MAKE_KERNEL_FUN(repeater)
MAKE_KERNEL_FUN(fractalSimplex)
MAKE_KERNEL_FUN(turbulence)
MAKE_KERNEL_FUN(repeaterTurbulence)
#define evalPoints(noiseFunc, points, ...)\
CHECK_INPUT(points);\
auto out = torch::empty({points.size(0)}, at::kCUDA);\
\
const int threads = 1024;\
const int blocks = (points.size(0)+threads-1)/threads;\
\
AT_DISPATCH_FLOATING_TYPES(points.type(), "noise", ([&] {\
\
hipLaunchKernelGGL(( noiseFunc##_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, \
points.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),\
out.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), __VA_ARGS__);\
\
}));\
\
return out;
#define evalPoints_(noiseFunc, out, points, ...)\
CHECK_INPUT(points);\
CHECK_INPUT(out);\
\
const int threads = 1024;\
const int blocks = (points.size(0)+threads-1)/threads;\
\
AT_DISPATCH_FLOATING_TYPES(points.type(), "noise", ([&] {\
\
hipLaunchKernelGGL(( noiseFunc##_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, \
points.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),\
out.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), __VA_ARGS__);\
\
}));\
\
return out;
torch::Tensor eval_simplexNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(simplexNoise, out, points, scale, (int) seed);
}
torch::Tensor eval_checker_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(checker, out, points, scale, (int) seed);
}
torch::Tensor eval_spots_(torch::Tensor out, torch::Tensor points, float scale, long long seed, float size, int minNum, int maxNum, float jitter, int shape) {
evalPoints_(spots, out, points, scale, (int) seed, size, minNum, maxNum, jitter, profileShape(shape));
}
torch::Tensor eval_worleyNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int minNum, int maxNum, float jitter) {
evalPoints_(worleyNoise, out, points, scale, (int) seed, minNum, maxNum, jitter);
}
torch::Tensor eval_worleyNoise_five_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int minNum, int maxNum, float jitter) {
evalPoints_(worleyNoise_five, out, points, scale, (int) seed, minNum, maxNum, jitter);
}
torch::Tensor eval_discreteNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(discreteNoise, out, points, scale, (int) seed);
}
torch::Tensor eval_linearValue_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(linearValue, out, points, scale, (int) seed);
}
torch::Tensor eval_fadedValue_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(fadedValue, out, points, scale, (int) seed);
}
torch::Tensor eval_cubicValue_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(cubicValue, out, points, scale, (int) seed);
}
torch::Tensor eval_perlinNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(perlinNoise, out, points, scale, (int) seed);
}
torch::Tensor eval_repeaterPerlin_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterPerlin, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeaterPerlinAbs_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterPerlinAbs, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeaterSimplex_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterSimplex, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeaterSimplexAbs_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterSimplexAbs, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeater_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay, int basis) {
evalPoints_(repeater, out, points, scale, (int) seed, n, lacunarity, decay, basisFunction(basis));
}
torch::Tensor eval_fractalSimplex_(torch::Tensor out, torch::Tensor points, float scale, long long seed, float du, int n, float lacunarity, float decay) {
evalPoints_(fractalSimplex, out, points, scale, (int) seed, du, n, lacunarity, decay);
}
torch::Tensor eval_turbulence_(torch::Tensor out, torch::Tensor points, float scaleIn, float scaleOut, long long seed, float strength, int inFunc, int outFunc) {
evalPoints_(turbulence, out, points, scaleIn, scaleOut, (int) seed, strength, basisFunction(inFunc), basisFunction(outFunc));
}
torch::Tensor eval_repeaterTurbulence_(torch::Tensor out, torch::Tensor points, float scaleIn, float scaleOut, long long seed, float strength, int n, int inFunc, int outFunc) {
evalPoints_(repeaterTurbulence, out, points, scaleIn, scaleOut, (int) seed, strength, n, basisFunction(inFunc), basisFunction(outFunc));
}
template<typename scalar_t>
__global__
void kernel_range3D(
torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> out,
float3 start,
float3 step_size,
dim3 steps) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x < out.size(0) && y < out.size(1) && z < out.size(2)) {
out[x][y][z][0] = start.x + x*step_size.x;
out[x][y][z][1] = start.y + y*step_size.y;
out[x][y][z][2] = start.z + z*step_size.z;
}
}
torch::Tensor eval_range3D(
float startX, float startY, float startZ,
float stepSizeX, float stepSizeY, float stepSizeZ,
int stepX, int stepY, int stepZ) {
dim3 steps(stepX, stepY, stepZ);
const dim3 threads(32,16,1);
const dim3 blocks((steps.x+threads.x-1)/threads.x,(steps.y+threads.y-1)/threads.y,(steps.z+threads.z-1)/threads.z);
auto out = torch::empty({steps.x,steps.y,steps.z,3}, at::kCUDA);
AT_DISPATCH_FLOATING_TYPES(out.type(), "simplex3_forward_cuda", ([&] {
hipLaunchKernelGGL(( kernel_range3D<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
out. packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(),
make_float3(startX, startY, startZ ),
make_float3(stepSizeX, stepSizeY, stepSizeZ),
steps
);
hipDeviceSynchronize();
}));
return out;
} | f0c77093f38c5b2839f7b432a579c1f2e07d0821.cu |
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/cuda.h>
#include "cuda_noise.cuh"
#include "bindings.h"
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
using namespace cudaNoise;
#define MAKE_KERNEL_FUN(noiseFunc)\
template<typename scalar_t, typename... Args>\
__global__ void noiseFunc##_kernel (\
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> points,\
torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> out,\
Args... args) {\
\
const int x = blockIdx.x * blockDim.x + threadIdx.x;\
if (x < points.size(0)) {\
out[x] = noiseFunc (make_float3(points[x][0],points[x][1],points[x][2]), args...);\
}\
}
MAKE_KERNEL_FUN(simplexNoise)
MAKE_KERNEL_FUN(checker)
MAKE_KERNEL_FUN(spots)
MAKE_KERNEL_FUN(worleyNoise)
MAKE_KERNEL_FUN(worleyNoise_five)
MAKE_KERNEL_FUN(discreteNoise)
MAKE_KERNEL_FUN(linearValue)
MAKE_KERNEL_FUN(fadedValue)
MAKE_KERNEL_FUN(cubicValue)
MAKE_KERNEL_FUN(perlinNoise)
MAKE_KERNEL_FUN(repeaterPerlin)
MAKE_KERNEL_FUN(repeaterPerlinAbs)
MAKE_KERNEL_FUN(repeaterSimplex)
MAKE_KERNEL_FUN(repeaterSimplexAbs)
MAKE_KERNEL_FUN(repeater)
MAKE_KERNEL_FUN(fractalSimplex)
MAKE_KERNEL_FUN(turbulence)
MAKE_KERNEL_FUN(repeaterTurbulence)
#define evalPoints(noiseFunc, points, ...)\
CHECK_INPUT(points);\
auto out = torch::empty({points.size(0)}, at::kCUDA);\
\
const int threads = 1024;\
const int blocks = (points.size(0)+threads-1)/threads;\
\
AT_DISPATCH_FLOATING_TYPES(points.type(), "noise", ([&] {\
\
noiseFunc##_kernel<scalar_t><<<blocks, threads>>>(\
points.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),\
out.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), __VA_ARGS__);\
\
}));\
\
return out;
#define evalPoints_(noiseFunc, out, points, ...)\
CHECK_INPUT(points);\
CHECK_INPUT(out);\
\
const int threads = 1024;\
const int blocks = (points.size(0)+threads-1)/threads;\
\
AT_DISPATCH_FLOATING_TYPES(points.type(), "noise", ([&] {\
\
noiseFunc##_kernel<scalar_t><<<blocks, threads>>>(\
points.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),\
out.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>(), __VA_ARGS__);\
\
}));\
\
return out;
torch::Tensor eval_simplexNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(simplexNoise, out, points, scale, (int) seed);
}
torch::Tensor eval_checker_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(checker, out, points, scale, (int) seed);
}
torch::Tensor eval_spots_(torch::Tensor out, torch::Tensor points, float scale, long long seed, float size, int minNum, int maxNum, float jitter, int shape) {
evalPoints_(spots, out, points, scale, (int) seed, size, minNum, maxNum, jitter, profileShape(shape));
}
torch::Tensor eval_worleyNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int minNum, int maxNum, float jitter) {
evalPoints_(worleyNoise, out, points, scale, (int) seed, minNum, maxNum, jitter);
}
torch::Tensor eval_worleyNoise_five_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int minNum, int maxNum, float jitter) {
evalPoints_(worleyNoise_five, out, points, scale, (int) seed, minNum, maxNum, jitter);
}
torch::Tensor eval_discreteNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(discreteNoise, out, points, scale, (int) seed);
}
torch::Tensor eval_linearValue_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(linearValue, out, points, scale, (int) seed);
}
torch::Tensor eval_fadedValue_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(fadedValue, out, points, scale, (int) seed);
}
torch::Tensor eval_cubicValue_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(cubicValue, out, points, scale, (int) seed);
}
torch::Tensor eval_perlinNoise_(torch::Tensor out, torch::Tensor points, float scale, long long seed) {
evalPoints_(perlinNoise, out, points, scale, (int) seed);
}
torch::Tensor eval_repeaterPerlin_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterPerlin, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeaterPerlinAbs_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterPerlinAbs, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeaterSimplex_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterSimplex, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeaterSimplexAbs_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay) {
evalPoints_(repeaterSimplexAbs, out, points, scale, (int) seed, n, lacunarity, decay);
}
torch::Tensor eval_repeater_(torch::Tensor out, torch::Tensor points, float scale, long long seed, int n, float lacunarity, float decay, int basis) {
evalPoints_(repeater, out, points, scale, (int) seed, n, lacunarity, decay, basisFunction(basis));
}
torch::Tensor eval_fractalSimplex_(torch::Tensor out, torch::Tensor points, float scale, long long seed, float du, int n, float lacunarity, float decay) {
evalPoints_(fractalSimplex, out, points, scale, (int) seed, du, n, lacunarity, decay);
}
torch::Tensor eval_turbulence_(torch::Tensor out, torch::Tensor points, float scaleIn, float scaleOut, long long seed, float strength, int inFunc, int outFunc) {
evalPoints_(turbulence, out, points, scaleIn, scaleOut, (int) seed, strength, basisFunction(inFunc), basisFunction(outFunc));
}
torch::Tensor eval_repeaterTurbulence_(torch::Tensor out, torch::Tensor points, float scaleIn, float scaleOut, long long seed, float strength, int n, int inFunc, int outFunc) {
evalPoints_(repeaterTurbulence, out, points, scaleIn, scaleOut, (int) seed, strength, n, basisFunction(inFunc), basisFunction(outFunc));
}
template<typename scalar_t>
__global__
void kernel_range3D(
torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> out,
float3 start,
float3 step_size,
dim3 steps) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x < out.size(0) && y < out.size(1) && z < out.size(2)) {
out[x][y][z][0] = start.x + x*step_size.x;
out[x][y][z][1] = start.y + y*step_size.y;
out[x][y][z][2] = start.z + z*step_size.z;
}
}
torch::Tensor eval_range3D(
float startX, float startY, float startZ,
float stepSizeX, float stepSizeY, float stepSizeZ,
int stepX, int stepY, int stepZ) {
dim3 steps(stepX, stepY, stepZ);
const dim3 threads(32,16,1);
const dim3 blocks((steps.x+threads.x-1)/threads.x,(steps.y+threads.y-1)/threads.y,(steps.z+threads.z-1)/threads.z);
auto out = torch::empty({steps.x,steps.y,steps.z,3}, at::kCUDA);
AT_DISPATCH_FLOATING_TYPES(out.type(), "simplex3_forward_cuda", ([&] {
kernel_range3D<scalar_t><<<blocks, threads>>>(
out. packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(),
make_float3(startX, startY, startZ ),
make_float3(stepSizeX, stepSizeY, stepSizeZ),
steps
);
cudaDeviceSynchronize();
}));
return out;
} |
7d3309f11352080d000ed703a8d253d0291db4ac.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <vector>
#include <hip/hip_runtime.h>
#include "reference.h"
__global__
void glu_kernel(
const int M,
const int split_dim_size,
const int N,
const float* Xdata,
float* Ydata)
{
const int xOffset = 2 * split_dim_size * N;
const int yOffset = split_dim_size * N;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= M * split_dim_size * N) return;
const int i = index / split_dim_size / N;
const int j = index / N % split_dim_size;
const int k = index % N;
const float x1 = Xdata[i * xOffset + j * N + k];
const float x2 = Xdata[i * xOffset + (j + split_dim_size) * N + k];
Ydata[i * yOffset + j * N + k] = x1 * (1.f / (1.f + expf(-x2)));
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
// N-dimensional tensor (N is not limited to 3)
std::vector<int> Xshape = {1024, 1024, 1024};
std::vector<int> Yshape;
Yshape.insert(Yshape.end(), Xshape.begin(), Xshape.end());
const int ndims = Yshape.size() - 1;
printf("Shape of input tensor: ( ");
for (int i = 0; i < ndims + 1; i++)
printf("%d ", Xshape[i]);
printf(")\n");
uint64_t nelems = size_from_dim(0, Xshape);
uint64_t nelems_bytes = nelems * sizeof(float);
float *X = (float*) malloc (nelems_bytes);
float *Y = (float*) malloc (nelems_bytes);
float *Y_ref = (float*) malloc (nelems_bytes);
std::default_random_engine generator(123);
std::uniform_real_distribution<float> distribution(-6.f,6.f);
for (uint64_t i = 0; i < nelems; i++) {
X[i] = distribution(generator);
}
float *d_X;
hipMalloc((void**)&d_X, nelems_bytes);
hipMemcpy(d_X, X, nelems_bytes, hipMemcpyHostToDevice);
float *d_Y;
hipMalloc((void**)&d_Y, nelems_bytes);
const int block_size = 256;
for (int input_dim = -1; input_dim < 3 * ndims; input_dim++) {
const int split_index = (input_dim == -1) ? ndims : (input_dim % (ndims+1));
if (Yshape[split_index] % 2 != 0) {
printf("Split dimension %d should be divided by two. Skip\n", Yshape[split_index]);
continue;
}
const int split_dim_size = Yshape[split_index] / 2;
const int m = size_to_dim(split_index, Xshape);
const int n = size_from_dim(split_index + 1, Xshape);
ComputeGlu(m, split_dim_size, n, X, Y_ref);
dim3 grids ((m * split_dim_size * n + block_size - 1) / block_size);
dim3 blocks (block_size);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( glu_kernel), dim3(grids), dim3(blocks), 0, 0, m, split_dim_size, n, d_X, d_Y);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of GLU kernel (split dimension = %d): %f (us)\n",
split_index, (time * 1e-3f) / repeat);
hipMemcpy(Y, d_Y, nelems_bytes, hipMemcpyDeviceToHost);
bool ok = true;
for (uint64_t i = 0; i < nelems/2; i++) {
if (fabsf(Y[i] - Y_ref[i]) > 1e-3f) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
}
free(X);
free(Y);
free(Y_ref);
hipFree(d_X);
hipFree(d_Y);
return 0;
}
| 7d3309f11352080d000ed703a8d253d0291db4ac.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <vector>
#include <cuda.h>
#include "reference.h"
__global__
void glu_kernel(
const int M,
const int split_dim_size,
const int N,
const float* Xdata,
float* Ydata)
{
const int xOffset = 2 * split_dim_size * N;
const int yOffset = split_dim_size * N;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= M * split_dim_size * N) return;
const int i = index / split_dim_size / N;
const int j = index / N % split_dim_size;
const int k = index % N;
const float x1 = Xdata[i * xOffset + j * N + k];
const float x2 = Xdata[i * xOffset + (j + split_dim_size) * N + k];
Ydata[i * yOffset + j * N + k] = x1 * (1.f / (1.f + expf(-x2)));
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
// N-dimensional tensor (N is not limited to 3)
std::vector<int> Xshape = {1024, 1024, 1024};
std::vector<int> Yshape;
Yshape.insert(Yshape.end(), Xshape.begin(), Xshape.end());
const int ndims = Yshape.size() - 1;
printf("Shape of input tensor: ( ");
for (int i = 0; i < ndims + 1; i++)
printf("%d ", Xshape[i]);
printf(")\n");
uint64_t nelems = size_from_dim(0, Xshape);
uint64_t nelems_bytes = nelems * sizeof(float);
float *X = (float*) malloc (nelems_bytes);
float *Y = (float*) malloc (nelems_bytes);
float *Y_ref = (float*) malloc (nelems_bytes);
std::default_random_engine generator(123);
std::uniform_real_distribution<float> distribution(-6.f,6.f);
for (uint64_t i = 0; i < nelems; i++) {
X[i] = distribution(generator);
}
float *d_X;
cudaMalloc((void**)&d_X, nelems_bytes);
cudaMemcpy(d_X, X, nelems_bytes, cudaMemcpyHostToDevice);
float *d_Y;
cudaMalloc((void**)&d_Y, nelems_bytes);
const int block_size = 256;
for (int input_dim = -1; input_dim < 3 * ndims; input_dim++) {
const int split_index = (input_dim == -1) ? ndims : (input_dim % (ndims+1));
if (Yshape[split_index] % 2 != 0) {
printf("Split dimension %d should be divided by two. Skip\n", Yshape[split_index]);
continue;
}
const int split_dim_size = Yshape[split_index] / 2;
const int m = size_to_dim(split_index, Xshape);
const int n = size_from_dim(split_index + 1, Xshape);
ComputeGlu(m, split_dim_size, n, X, Y_ref);
dim3 grids ((m * split_dim_size * n + block_size - 1) / block_size);
dim3 blocks (block_size);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
glu_kernel<<<grids, blocks>>>(m, split_dim_size, n, d_X, d_Y);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of GLU kernel (split dimension = %d): %f (us)\n",
split_index, (time * 1e-3f) / repeat);
cudaMemcpy(Y, d_Y, nelems_bytes, cudaMemcpyDeviceToHost);
bool ok = true;
for (uint64_t i = 0; i < nelems/2; i++) {
if (fabsf(Y[i] - Y_ref[i]) > 1e-3f) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
}
free(X);
free(Y);
free(Y_ref);
cudaFree(d_X);
cudaFree(d_Y);
return 0;
}
|
2885e5665674e0b25f3cd0cc41e3f1bd191a3257.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void dkernel(unsigned *matrix) {
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
matrix[id] = id;
}
#define N 5
#define M 6
int main() {
dim3 block(N, M, 1);
unsigned *matrix, *hmatrix;
hipMalloc(&matrix, N * M * sizeof(unsigned));
hmatrix = (unsigned *)malloc(N * M * sizeof(unsigned));
hipLaunchKernelGGL(( dkernel), dim3(1), dim3(block), 0, 0, matrix);
hipMemcpy(hmatrix, matrix, N * M * sizeof(unsigned), hipMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
for (unsigned jj = 0; jj < M; ++jj) {
printf("%2d ", hmatrix[ii * M + jj]);
}
printf("\n");
}
return 0;
}
| 2885e5665674e0b25f3cd0cc41e3f1bd191a3257.cu | #include <stdio.h>
#include <cuda.h>
__global__ void dkernel(unsigned *matrix) {
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
matrix[id] = id;
}
#define N 5
#define M 6
int main() {
dim3 block(N, M, 1);
unsigned *matrix, *hmatrix;
cudaMalloc(&matrix, N * M * sizeof(unsigned));
hmatrix = (unsigned *)malloc(N * M * sizeof(unsigned));
dkernel<<<1, block>>>(matrix);
cudaMemcpy(hmatrix, matrix, N * M * sizeof(unsigned), cudaMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
for (unsigned jj = 0; jj < M; ++jj) {
printf("%2d ", hmatrix[ii * M + jj]);
}
printf("\n");
}
return 0;
}
|
29f27b0761d04fe426ab99268abdd046cfd5f49f.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "functions/log.h"
#include "test_utils.h"
#include "cuda_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogInputs {
T tolerance;
int len;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const LogInputs<T>& dims) {
return os;
}
template <typename T>
class LogTest: public ::testing::TestWithParam<LogInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogInputs<T>>::GetParam();
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
int len = params.len;
allocate(data, len);
T data_h[params.len] = { 2.1, 4.5, 0.34, 10.0 };
updateDevice(data, data_h, len, stream);
allocate(result, len);
allocate(result_ref, len);
T result_ref_h[params.len] = { 0.74193734, 1.5040774, -1.07880966, 2.30258509 };
updateDevice(result_ref, result_ref_h, len, stream);
f_log(result, data, T(1), len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(result));
CUDA_CHECK(hipFree(result_ref));
}
protected:
LogInputs<T> params;
T *data, *result, *result_ref;
};
const std::vector<LogInputs<float> > inputsf2 = {
{0.001f, 4}
};
const std::vector<LogInputs<double> > inputsd2 = {
{0.001, 4}
};
typedef LogTest<float> LogTestValF;
TEST_P(LogTestValF, Result) {
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef LogTest<double> LogTestValD;
TEST_P(LogTestValD, Result){
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace Functions
} // end namespace MLCommon
| 29f27b0761d04fe426ab99268abdd046cfd5f49f.cu | #include <gtest/gtest.h>
#include "functions/log.h"
#include "test_utils.h"
#include "cuda_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogInputs {
T tolerance;
int len;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const LogInputs<T>& dims) {
return os;
}
template <typename T>
class LogTest: public ::testing::TestWithParam<LogInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogInputs<T>>::GetParam();
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
int len = params.len;
allocate(data, len);
T data_h[params.len] = { 2.1, 4.5, 0.34, 10.0 };
updateDevice(data, data_h, len, stream);
allocate(result, len);
allocate(result_ref, len);
T result_ref_h[params.len] = { 0.74193734, 1.5040774, -1.07880966, 2.30258509 };
updateDevice(result_ref, result_ref_h, len, stream);
f_log(result, data, T(1), len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(result));
CUDA_CHECK(cudaFree(result_ref));
}
protected:
LogInputs<T> params;
T *data, *result, *result_ref;
};
const std::vector<LogInputs<float> > inputsf2 = {
{0.001f, 4}
};
const std::vector<LogInputs<double> > inputsd2 = {
{0.001, 4}
};
typedef LogTest<float> LogTestValF;
TEST_P(LogTestValF, Result) {
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef LogTest<double> LogTestValD;
TEST_P(LogTestValD, Result){
ASSERT_TRUE(devArrMatch(result_ref, result, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace Functions
} // end namespace MLCommon
|
0d8e00db8a007dea590863b22f463e91b364bb99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/utils/cub_namespace.cuh"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
// TODO: Move this to fixed_divisor.h
#if defined(USE_ROCM)
#define FIXED_DIVISOR int32_t
#define FIXED_DIVISOR_DIV(d, n) (n / d)
#define FIXED_DIVISOR_MOD(d, n) (n % d)
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
#else // USE_ROCM
#define FIXED_DIVISOR FixedDivisor<int32_t>
#define FIXED_DIVISOR_DIV(d, n) (d.Div(n))
#define FIXED_DIVISOR_MOD(d, n) (d.Mod(n))
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r))
#endif // USE_ROCM
#if defined(USE_ROCM)
#define CUBLAS_HALF_TYPE hipblasHalf
#define HIPBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT
// until we use hipblas v2
// hipify correctly maps things like HIP_R_16F to HIP_R_16F,
// however hipblas v1 is still using its custom type
#define HIP_R_16F HIPBLAS_R_16F
#define HIP_R_32F HIPBLAS_R_32F
#else // __HIP_PLATFORM_HCC
#define CUBLAS_HALF_TYPE __half
#endif // __HIP_PLATFORM_HCC
#include "caffe2/utils/math/utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half operator()( \
const at::Half& lhs, \
const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = FIXED_DIVISOR_MOD(cols, C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = FIXED_DIVISOR_DIV(cols, C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FIXED_DIVISOR, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FIXED_DIVISOR cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
if (broadcast_1st) {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FIXED_DIVISOR, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = ::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, op, A, B, C);
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
#if defined(USE_ROCM)
// hipblas doesn't support hipblasSgemmEx type API.
// It has more general hipblasGemmEx API which is more close to hipblasGemmEx.
// hipblasGemmEx does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasSgemmEx does C = alpha*op( A )*op( B ) + beta*C
HIPBLAS_ENFORCE(hipblasGemmEx(
context->hipblas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIPBLAS_R_16F,
ldb,
A,
HIPBLAS_R_16F,
lda,
&beta,
C,
HIPBLAS_R_16F,
N,
HIPBLAS_R_32F, // compute type
HIPBLAS_GEMM_DEFAULT));
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
#endif // USE_ROCM
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call hipblasHgemm
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(USE_ROCM)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
HIP_R_16F,
ldb,
A_device.data().get(),
HIP_R_16F,
lda,
&beta,
C_device.data().get(),
HIP_R_16F,
ldc,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
thrust::host_vector<const __half*> A_array(batch_size);
thrust::host_vector<const __half*> B_array(batch_size);
thrust::host_vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE* const*>(B_device.data().get()),
ldb,
reinterpret_cast<const CUBLAS_HALF_TYPE* const*>(A_device.data().get()),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE* const*>(C_device.data().get()),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
B_stride,
A,
HIP_R_16F,
lda,
A_stride,
&beta,
C,
HIP_R_16F,
ldc,
C_stride,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
B_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
A_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
#if defined(USE_ROCM)
// hipblas doesn't support hipblasSgemmEx type API.
// It has more general hipblasGemmEx API which is more close to hipblasGemmEx.
// hipblasGemmEx does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasSgemmEx does C = alpha*op( A )*op( B ) + beta*C
HIPBLAS_ENFORCE(hipblasGemmEx(
context->hipblas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIPBLAS_R_16F,
lda,
x,
HIPBLAS_R_16F,
k,
&beta,
y,
HIPBLAS_R_16F,
ldc,
HIPBLAS_R_32F, // compute type
HIPBLAS_GEMM_DEFAULT));
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
lda,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
ldc));
#endif // USE_ROCM
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(x),
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(y),
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
#if !defined(USE_ROCM)
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
Gemv<float, CUDAContext, DefaultEngine>(
trans_A, M, N, alpha, A, x, beta, y, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
Gemv<at::Half, CUDAContext, DefaultEngine>(
trans_A, M, N, alpha, A, x, beta, y, context, math_type);
}
#endif
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
// execute with 32-bit math
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
y,
HIP_R_16F,
HIP_R_32F));
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<int64_t>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<at::Half>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
gpu_atomic_add(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
gpu_atomic_add(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Im2Col.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::Copy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
hipMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
C10_HIP_CHECK(hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream()));
}
}
template <>
CAFFE2_CUDA_EXPORT void CopyVector<int, CUDAContext>(
const int N,
const int* src,
int* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
C10_HIP_CHECK(hipMemcpyAsync(
dst,
src,
sizeof(int) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream()));
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FIXED_DIVISOR, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context, \
bool) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, epsilon, var, inv_std); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
} // namespace math
} // namespace caffe2
| 0d8e00db8a007dea590863b22f463e91b364bb99.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/cub.cuh>
#include <cub/block/block_reduce.cuh>
#include "caffe2/utils/cub_namespace.cuh"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
// TODO: Move this to fixed_divisor.h
#if defined(USE_ROCM)
#define FIXED_DIVISOR int32_t
#define FIXED_DIVISOR_DIV(d, n) (n / d)
#define FIXED_DIVISOR_MOD(d, n) (n % d)
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
#else // USE_ROCM
#define FIXED_DIVISOR FixedDivisor<int32_t>
#define FIXED_DIVISOR_DIV(d, n) (d.Div(n))
#define FIXED_DIVISOR_MOD(d, n) (d.Mod(n))
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r))
#endif // USE_ROCM
#if defined(USE_ROCM)
#define CUBLAS_HALF_TYPE hipblasHalf
#define HIPBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT
// until we use hipblas v2
// hipify correctly maps things like CUDA_R_16F to HIP_R_16F,
// however hipblas v1 is still using its custom type
#define HIP_R_16F HIPBLAS_R_16F
#define HIP_R_32F HIPBLAS_R_32F
#else // __HIP_PLATFORM_HCC
#define CUBLAS_HALF_TYPE __half
#endif // __HIP_PLATFORM_HCC
#include "caffe2/utils/math/utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half operator()( \
const at::Half& lhs, \
const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = FIXED_DIVISOR_MOD(cols, C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = FIXED_DIVISOR_DIV(cols, C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FIXED_DIVISOR, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FIXED_DIVISOR cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
if (broadcast_1st) {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FIXED_DIVISOR, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = std::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, op, A, B, C);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
#if defined(USE_ROCM)
// hipblas doesn't support hipblasSgemmEx type API.
// It has more general hipblasGemmEx API which is more close to cublasGemmEx.
// hipblasGemmEx does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasSgemmEx does C = alpha*op( A )*op( B ) + beta*C
HIPBLAS_ENFORCE(hipblasGemmEx(
context->hipblas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIPBLAS_R_16F,
ldb,
A,
HIPBLAS_R_16F,
lda,
&beta,
C,
HIPBLAS_R_16F,
N,
HIPBLAS_R_32F, // compute type
HIPBLAS_GEMM_DEFAULT));
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
#endif // USE_ROCM
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call cublasHgemm
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(USE_ROCM)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
CUDA_R_16F,
ldb,
A_device.data().get(),
CUDA_R_16F,
lda,
&beta,
C_device.data().get(),
CUDA_R_16F,
ldc,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
thrust::host_vector<const __half*> A_array(batch_size);
thrust::host_vector<const __half*> B_array(batch_size);
thrust::host_vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE* const*>(B_device.data().get()),
ldb,
reinterpret_cast<const CUBLAS_HALF_TYPE* const*>(A_device.data().get()),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE* const*>(C_device.data().get()),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
B_stride,
A,
CUDA_R_16F,
lda,
A_stride,
&beta,
C,
CUDA_R_16F,
ldc,
C_stride,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
B_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
A_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
#if defined(USE_ROCM)
// hipblas doesn't support hipblasSgemmEx type API.
// It has more general hipblasGemmEx API which is more close to cublasGemmEx.
// hipblasGemmEx does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasSgemmEx does C = alpha*op( A )*op( B ) + beta*C
HIPBLAS_ENFORCE(hipblasGemmEx(
context->hipblas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIPBLAS_R_16F,
lda,
x,
HIPBLAS_R_16F,
k,
&beta,
y,
HIPBLAS_R_16F,
ldc,
HIPBLAS_R_32F, // compute type
HIPBLAS_GEMM_DEFAULT));
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
lda,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
ldc));
#endif // USE_ROCM
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(x),
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(y),
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
#if !defined(USE_ROCM)
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
Gemv<float, CUDAContext, DefaultEngine>(
trans_A, M, N, alpha, A, x, beta, y, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
Gemv<at::Half, CUDAContext, DefaultEngine>(
trans_A, M, N, alpha, A, x, beta, y, context, math_type);
}
#endif
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
// execute with 32-bit math
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
y,
CUDA_R_16F,
CUDA_R_32F));
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<int64_t>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
SelectKernel<at::Half>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
gpu_atomic_add(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
gpu_atomic_add(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Im2Col.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::Copy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
cudaMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
C10_CUDA_CHECK(cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream()));
}
}
template <>
CAFFE2_CUDA_EXPORT void CopyVector<int, CUDAContext>(
const int N,
const int* src,
int* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
C10_CUDA_CHECK(cudaMemcpyAsync(
dst,
src,
sizeof(int) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream()));
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FIXED_DIVISOR, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context, \
bool) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
InvStdCUDAKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, epsilon, var, inv_std); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
} // namespace math
} // namespace caffe2
|
d786accc468ee025302494f70f6692e271dda006.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/bds.h>
#include <Status.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void bdsLoopKernel(void const* inputX, Nd4jLong const* inputXshape, void const* inputY, Nd4jLong const* inputYshape, void* output, Nd4jLong* outputShape) {
__shared__ T const* x;
__shared__ T const* y;
__shared__ T* z;
__shared__ bool speedWay;
//__shared__ int indexX, indexY;
__shared__ Nd4jLong xLen, yLen, outputLen;
if (threadIdx.x == 0) {
x = reinterpret_cast<T const*>(inputX);
y = reinterpret_cast<T const*>(inputY);
z = reinterpret_cast<T*>(output);
xLen = shape::length(inputXshape);
yLen = shape::length(inputYshape);
outputLen = shape::length(outputShape);
speedWay = true;
speedWay = speedWay && (shape::elementWiseStride(inputXshape) == 1);
speedWay = speedWay && (shape::elementWiseStride(inputYshape) == 1);
speedWay = speedWay && (shape::elementWiseStride(outputShape) == 1);
}
__syncthreads();
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int e = tid; e < outputLen; e += step) {
T val;
if (speedWay) {
if (e < nd4j::math::nd4j_min(yLen, xLen)) {
val = nd4j::math::nd4j_max(x[e], y[e]);
} else if (e < xLen) {
val = nd4j::math::nd4j_max(x[e], y[yLen - 1]);
} else {
val = nd4j::math::nd4j_max(x[xLen - 1], y[e]);
}
z[e] = val;
}
else {
auto xIndex = e < xLen?shape::getIndexOffset(e, inputXshape, xLen):shape::getIndexOffset(xLen, inputXshape, xLen);
auto yIndex = e < yLen?shape::getIndexOffset(e, inputYshape, yLen):shape::getIndexOffset(yLen - 1, inputYshape, yLen);
auto zIndex = shape::getIndexOffset(e, outputShape, outputLen);
z[zIndex] = nd4j::math::nd4j_max(x[xIndex], y[yIndex]);
}
}
}
template <typename T>
static void bdsLoopH(hipStream_t* stream, void const* inputX, Nd4jLong const* inputXshape, void const* inputY, Nd4jLong const* inputYshape, void* output, Nd4jLong* outputShape) {
hipLaunchKernelGGL(( bdsLoopKernel<T>), dim3(1), dim3(256), 512, *stream, inputX, inputXshape, inputY, inputYshape, output, outputShape);
}
Nd4jStatus bdsFunctor(nd4j::LaunchContext * context, NDArray* x_shape, NDArray* y_shape, NDArray* output) {
//int e = 0, x = 0, y = 0;
NDArray::prepareSpecialUse({output}, {x_shape, y_shape});
if (x_shape->lengthOf() == 1 || y_shape->lengthOf() == 1) {// except case
x_shape->syncToHost(); y_shape->syncToHost();
if (x_shape->lengthOf() == y_shape->lengthOf()) {
auto greater = (x_shape->e<Nd4jLong>(0) < y_shape->e<Nd4jLong>(0) ? y_shape : x_shape);
output->assign(greater);
}
else {
auto lesser = (x_shape->lengthOf() == 1 ? x_shape : y_shape);
auto greater = (x_shape->lengthOf() == 1 ? y_shape : x_shape);
output->assign(greater);
auto lastG = greater->lengthOf() - 1;
auto lastL = lesser->lengthOf() - 1;
if (greater->e<Nd4jLong>(lastG) < lesser->e<Nd4jLong>(lastL))
output->p(lastG, lesser->e(lastL));
output->syncToDevice();
}
}
else {
//bdsLoopH(context->getCudaStream(), x->getSpecialBuffer(), x->getSpecialShapeInfo(), y->getSpecialBuffer(), y->getSpecialShape(), output->specialBuffer(), output->specialShapeInfo())
BUILD_SINGLE_SELECTOR(output->dataType(), bdsLoopH, (context->getCudaStream(), x_shape->getSpecialBuffer(), x_shape->getSpecialShapeInfo(), y_shape->getSpecialBuffer(), y_shape->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()), NUMERIC_TYPES);
}
NDArray::registerSpecialUse({output}, {x_shape, y_shape});
return Status::OK();
return Status::OK();
}
BUILD_SINGLE_TEMPLATE(template void bdsLoopH, (hipStream_t* stream, void const* inputX, Nd4jLong const* inputXshape, void const* inputY, Nd4jLong const* inputYshape, void* output, Nd4jLong* outputShape), NUMERIC_TYPES);
}
}
} | d786accc468ee025302494f70f6692e271dda006.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/bds.h>
#include <Status.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void bdsLoopKernel(void const* inputX, Nd4jLong const* inputXshape, void const* inputY, Nd4jLong const* inputYshape, void* output, Nd4jLong* outputShape) {
__shared__ T const* x;
__shared__ T const* y;
__shared__ T* z;
__shared__ bool speedWay;
//__shared__ int indexX, indexY;
__shared__ Nd4jLong xLen, yLen, outputLen;
if (threadIdx.x == 0) {
x = reinterpret_cast<T const*>(inputX);
y = reinterpret_cast<T const*>(inputY);
z = reinterpret_cast<T*>(output);
xLen = shape::length(inputXshape);
yLen = shape::length(inputYshape);
outputLen = shape::length(outputShape);
speedWay = true;
speedWay = speedWay && (shape::elementWiseStride(inputXshape) == 1);
speedWay = speedWay && (shape::elementWiseStride(inputYshape) == 1);
speedWay = speedWay && (shape::elementWiseStride(outputShape) == 1);
}
__syncthreads();
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int e = tid; e < outputLen; e += step) {
T val;
if (speedWay) {
if (e < nd4j::math::nd4j_min(yLen, xLen)) {
val = nd4j::math::nd4j_max(x[e], y[e]);
} else if (e < xLen) {
val = nd4j::math::nd4j_max(x[e], y[yLen - 1]);
} else {
val = nd4j::math::nd4j_max(x[xLen - 1], y[e]);
}
z[e] = val;
}
else {
auto xIndex = e < xLen?shape::getIndexOffset(e, inputXshape, xLen):shape::getIndexOffset(xLen, inputXshape, xLen);
auto yIndex = e < yLen?shape::getIndexOffset(e, inputYshape, yLen):shape::getIndexOffset(yLen - 1, inputYshape, yLen);
auto zIndex = shape::getIndexOffset(e, outputShape, outputLen);
z[zIndex] = nd4j::math::nd4j_max(x[xIndex], y[yIndex]);
}
}
}
template <typename T>
static void bdsLoopH(cudaStream_t* stream, void const* inputX, Nd4jLong const* inputXshape, void const* inputY, Nd4jLong const* inputYshape, void* output, Nd4jLong* outputShape) {
bdsLoopKernel<T><<<1, 256, 512, *stream>>>(inputX, inputXshape, inputY, inputYshape, output, outputShape);
}
Nd4jStatus bdsFunctor(nd4j::LaunchContext * context, NDArray* x_shape, NDArray* y_shape, NDArray* output) {
//int e = 0, x = 0, y = 0;
NDArray::prepareSpecialUse({output}, {x_shape, y_shape});
if (x_shape->lengthOf() == 1 || y_shape->lengthOf() == 1) {// except case
x_shape->syncToHost(); y_shape->syncToHost();
if (x_shape->lengthOf() == y_shape->lengthOf()) {
auto greater = (x_shape->e<Nd4jLong>(0) < y_shape->e<Nd4jLong>(0) ? y_shape : x_shape);
output->assign(greater);
}
else {
auto lesser = (x_shape->lengthOf() == 1 ? x_shape : y_shape);
auto greater = (x_shape->lengthOf() == 1 ? y_shape : x_shape);
output->assign(greater);
auto lastG = greater->lengthOf() - 1;
auto lastL = lesser->lengthOf() - 1;
if (greater->e<Nd4jLong>(lastG) < lesser->e<Nd4jLong>(lastL))
output->p(lastG, lesser->e(lastL));
output->syncToDevice();
}
}
else {
//bdsLoopH(context->getCudaStream(), x->getSpecialBuffer(), x->getSpecialShapeInfo(), y->getSpecialBuffer(), y->getSpecialShape(), output->specialBuffer(), output->specialShapeInfo())
BUILD_SINGLE_SELECTOR(output->dataType(), bdsLoopH, (context->getCudaStream(), x_shape->getSpecialBuffer(), x_shape->getSpecialShapeInfo(), y_shape->getSpecialBuffer(), y_shape->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()), NUMERIC_TYPES);
}
NDArray::registerSpecialUse({output}, {x_shape, y_shape});
return Status::OK();
return Status::OK();
}
BUILD_SINGLE_TEMPLATE(template void bdsLoopH, (cudaStream_t* stream, void const* inputX, Nd4jLong const* inputXshape, void const* inputY, Nd4jLong const* inputYshape, void* output, Nd4jLong* outputShape), NUMERIC_TYPES);
}
}
} |
dce91c901f2e000eeadde35c302a1ae5a87e8c7d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf("Error: %s : %d", __FILE__, __LINE__);
printf("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
__device__ float devData;
__global__ void checkGlobalVar()
{
// display original value
printf("Device global var: %f\n", devData);
// alter the value
devData += 2.0f;
}
int main(void)
{
// initialize global variable
float value = 3.14f;
checkCuda(hipMemcpyToSymbol(devData, &value, sizeof(float)));
printf_s("Host: copied %f to the global variable\n", value);
// invoke the kernel
hipLaunchKernelGGL(( checkGlobalVar) , dim3(1), dim3(1) , 0, 0, );
// copy the global variable back to the host
checkCuda(hipMemcpyFromSymbol(&value, devData, sizeof(float)));
printf_s("Host: the value changed by the kernel to %f\n", value);
checkCuda(hipDeviceReset());
return 0;
} | dce91c901f2e000eeadde35c302a1ae5a87e8c7d.cu | #include <stdio.h>
#include <cuda_runtime.h>
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf("Error: %s : %d", __FILE__, __LINE__);
printf("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
__device__ float devData;
__global__ void checkGlobalVar()
{
// display original value
printf("Device global var: %f\n", devData);
// alter the value
devData += 2.0f;
}
int main(void)
{
// initialize global variable
float value = 3.14f;
checkCuda(cudaMemcpyToSymbol(devData, &value, sizeof(float)));
printf_s("Host: copied %f to the global variable\n", value);
// invoke the kernel
checkGlobalVar <<<1, 1 >>>();
// copy the global variable back to the host
checkCuda(cudaMemcpyFromSymbol(&value, devData, sizeof(float)));
printf_s("Host: the value changed by the kernel to %f\n", value);
checkCuda(cudaDeviceReset());
return 0;
} |
b40e8f31db3a0697174d9580ca1c8064c2a7b1bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//!!nvcc -c test.cu --compiler-options -fPIC
//!g++ -o program -L/usr/local/cuda/lib64 main.cpp test.o -lcuda -lcudart
__global__ void add(float *a,float *b,float *c)
{
*c = *a +*b;
} | b40e8f31db3a0697174d9580ca1c8064c2a7b1bd.cu | #include "includes.h"
//!!nvcc -c test.cu --compiler-options -fPIC
//!g++ -o program -L/usr/local/cuda/lib64 main.cpp test.o -lcuda -lcudart
__global__ void add(float *a,float *b,float *c)
{
*c = *a +*b;
} |
ef9950d77dea1773e8d27751dfd5cc84ae33fdd9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "modified_insertion_sort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dist = NULL;
hipMalloc(&dist, XSIZE*YSIZE);
int dist_pitch = 2;
int *index = NULL;
hipMalloc(&index, XSIZE*YSIZE);
int index_pitch = 2;
int width = XSIZE;
int height = YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
modified_insertion_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, dist,dist_pitch,index,index_pitch,width,height,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
modified_insertion_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, dist,dist_pitch,index,index_pitch,width,height,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
modified_insertion_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, dist,dist_pitch,index,index_pitch,width,height,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ef9950d77dea1773e8d27751dfd5cc84ae33fdd9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "modified_insertion_sort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dist = NULL;
cudaMalloc(&dist, XSIZE*YSIZE);
int dist_pitch = 2;
int *index = NULL;
cudaMalloc(&index, XSIZE*YSIZE);
int index_pitch = 2;
int width = XSIZE;
int height = YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
modified_insertion_sort<<<gridBlock,threadBlock>>>(dist,dist_pitch,index,index_pitch,width,height,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
modified_insertion_sort<<<gridBlock,threadBlock>>>(dist,dist_pitch,index,index_pitch,width,height,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
modified_insertion_sort<<<gridBlock,threadBlock>>>(dist,dist_pitch,index,index_pitch,width,height,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
da39da62c65d62956f51624a4072637cb40c97a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h> // for ceil()
#include<stdexcept>
#include"tools.h"
#include"common.h"
// CUDA debugging
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define gpuKernelExecErrChk() { gpuCheckKernelExecutionError( __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if( abort )
exit(code);
}
}
inline void gpuCheckKernelExecutionError( const char *file, int line)
{
/**
Check for invalid launch argument, then force the host to wait
until the kernel stops and checks for an execution error.
The synchronisation can be eliminated if there is a subsequent blocking
API call like cudaMemcopy. In this case the hipMemcpy call can return
either errors which occurred during the kernel execution or those from
the memory copy itself. This can be confusing for the beginner, so it is
recommended to use explicit synchronisation after a kernel launch during
debugging to make it easier to understand where problems might be arising.
*/
gpuAssert( hipPeekAtLastError(), file, line);
gpuAssert( hipDeviceSynchronize(), file, line);
}
//---------------------------------------------------------
// STUDENTS BEGIN
__global__ void kernelSpmvCSR(uint rowsNbr, const float *values, const uint *col_ind, const uint *row_ptr, const float *v, float *y)
{
uint r = blockIdx.x * blockDim.x + threadIdx.x;
if( r < rowsNbr )
{
float dot = 0.0f;
int row_beg = row_ptr[r];
int row_end = row_ptr[r+1];
for(uint i = row_beg; i < row_end; i++)
dot += values[i] * v[col_ind[i]];
y[r] = dot;
}
}
// STUDENTS END
//---------------------------------------------------------
/**
Compute MxV on GPU. CSR method.
A reference result can be passed to check that the computation is ok.
*/
Matrix* gpuSpmvCSR(const MatrixCSR *m, const Matrix *v, const Matrix *reference = NULL)
{
const char *name = "CSR method on GPU";
double gpuComputeTime = 0.0; // time measurement
double gpuRunTime = 0.0; // time measurement
if(m->w != v->h)
throw std::runtime_error("Failed to multiply matrices, size mismatch.");
if(v->w != 1)
throw std::runtime_error("Failed to multiply matrices, vector size mismatch.");
// output matrix size
uint width = v->w;
uint height = m->h;
Matrix *mv = createMatrix(width, height);
// data size
uint valuesSizeInBytes = m->nzNbr * sizeof(float);
uint col_indSizeInBytes = m->nzNbr * sizeof(uint);
uint row_ptrSizeInBytes = (m->h + 1) * sizeof(uint);
uint vSizeInBytes = (v->h) * sizeof(float);
uint mvSizeInBytes = (m->h) * sizeof(float);
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement
int *gpuWakeUp;
gpuErrchk( hipMalloc( (void**) &gpuWakeUp, 1) );
// STUDENTS BEGIN
// On va appeler la mthode kernelSpmvCSR mais d'abord il faut allouer les paramtres passer
float* valuesIn;
gpuErrchk(hipMalloc((void**) &valuesIn, valuesSizeInBytes));
uint* colIndIn;
gpuErrchk(hipMalloc((void**) &colIndIn, col_indSizeInBytes));
uint* rowPtrIn;
gpuErrchk(hipMalloc((void**) &rowPtrIn, row_ptrSizeInBytes));
float* vIn;
gpuErrchk(hipMalloc((void**) &vIn, vSizeInBytes));
float* mvOut;
gpuErrchk(hipMalloc((void**) &mvOut, mvSizeInBytes));
// transfer data from CPU memory to GPU memory
top(0); // start time measurement
gpuErrchk(hipMemcpy(valuesIn, m->data, valuesSizeInBytes, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(colIndIn, m->col_ind, col_indSizeInBytes, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(rowPtrIn, m->row_ptr, row_ptrSizeInBytes, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(vIn, v->data, vSizeInBytes, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(mvOut, mv->data, mvSizeInBytes, hipMemcpyHostToDevice));
dim3 dimBlock(32);
dim3 dimGrid((int) ceilf(m->h*1.0/dimBlock.x));
top(1);
// Kernel launch
hipLaunchKernelGGL(( kernelSpmvCSR), dim3(dimGrid), dim3(dimBlock), 0, 0, m->h, valuesIn, colIndIn, rowPtrIn, vIn, mvOut);
gpuKernelExecErrChk();
gpuComputeTime = top(1); // pure computation duration
gpuErrchk(hipMemcpy(mv->data, mvOut, mvSizeInBytes, hipMemcpyDeviceToHost));
gpuRunTime = top(0); // computation and memory transfert time
// release CPU memory
hipFree(valuesIn);
hipFree(colIndIn);
hipFree(rowPtrIn);
hipFree(vIn);
hipFree(mvOut);
// STUDENTS END
// check result, display run time if result is correct
bool displayRunTime = true;
if( reference )
{
if(! checkResult(name, reference, mv))
displayRunTime = false;
}
if(displayRunTime)
printf("%s: M(%dx%d)xV computed in %f ms (%f ms of pure computation).\n", name, m->w, m->h, gpuRunTime, gpuComputeTime);
return mv;
}
//---------------------------------------------------------
// STUDENTS BEGIN
/*__global__ void kernelSpmvELL(uint rowsNbr, const float *values, const uint *col_ind, uint nzRowSz, const float *v, float *y)
{
}*/
// STUDENTS END
//---------------------------------------------------------
/**
Compute MxV on GPU. ELL method.
A reference result can be passed to check that the computation is ok.
*/
Matrix* gpuSpmvELL(const MatrixELL *m, const Matrix *v, const Matrix *reference = NULL)
{
const char *name = "ELL method on GPU";
double gpuComputeTime = 0.0; // time measurement
double gpuRunTime = 0.0; // time measurement
if(m->w != v->h)
throw std::runtime_error("Failed to multiply matrices, size mismatch.");
if(v->w != 1)
throw std::runtime_error("Failed to multiply matrices, vector size mismatch.");
// output matrix size
uint width = v->w;
uint height = m->h;
Matrix *mv = createMatrix(width, height);
// data size
uint valuesSizeInBytes = m->nzRowSz * m->h * sizeof(float);
uint col_indSizeInBytes = m->nzRowSz * m->h * sizeof(uint);
uint vSizeInBytes = (v->h) * sizeof(float);
uint mvSizeInBytes = (m->h) * sizeof(float);
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement
int *gpuWakeUp;
gpuErrchk( hipMalloc( (void**) &gpuWakeUp, 1) );
// STUDENTS BEGIN
//TODO
// STUDENTS END
// check result, display run time if result is correct
bool displayRunTime = true;
if( reference )
{
if(! checkResult(name, reference, mv))
displayRunTime = false;
}
if(displayRunTime)
printf("%s: M(%dx%d)xV computed in %f ms (%f ms of pure computation).\n", name, m->w, m->h, gpuRunTime, gpuComputeTime);
return mv;
}
//---------------------------------------------------------
// STUDENTS BEGIN
__global__ void kernelSpmvCSRVect(uint rowsNbr, const float *values, const uint *col_ind, const uint *row_ptr, const float *v, float *y)
{
// dynamically allocated shared memory, size given at kernel call
extern __shared__ float dots[];
uint threadId = blockIdx.x * blockDim.x + threadIdx.x; // global thread index
uint warpId = threadId / 32; // global warp index
uint lane = threadId % 32; // thread index within the warp
uint r = warpId; // one row per warp
if( r < rowsNbr )
{
int row_beg = row_ptr[r];
int row_end = row_ptr[r+1];
dots[threadIdx.x] = 0.0f;
for(uint i = row_beg + lane; i < row_end; i+=32)
dots[threadIdx.x] += values[i] * v[col_ind[i]];
// parallel reduction in shared memory
if( lane < 16 ) dots[threadIdx.x] += dots[threadIdx.x + 16];
if( lane < 8 ) dots[threadIdx.x] += dots[threadIdx.x + 8];
if( lane < 4 ) dots[threadIdx.x] += dots[threadIdx.x + 4];
if( lane < 2 ) dots[threadIdx.x] += dots[threadIdx.x + 2];
if( lane < 1 ) dots[threadIdx.x] += dots[threadIdx.x + 1];
// first thread writes the result in global memory
if( lane == 0 )
y[r] = dots[threadIdx.x];
}
}
// STUDENTS END
//---------------------------------------------------------
/**
Compute MxV on GPU. CSR-Vect method.
A reference result can be passed to check that the computation is ok.
*/
Matrix* gpuSpmvCSRVect(const MatrixCSR *m, const Matrix *v, const Matrix *reference = NULL)
{
const char *name = "CSR-Vect method on GPU";
double gpuComputeTime = 0.0; // time measurement
double gpuRunTime = 0.0; // time measurement
if(m->w != v->h)
throw std::runtime_error("Failed to multiply matrices, size mismatch.");
if(v->w != 1)
throw std::runtime_error("Failed to multiply matrices, vector size mismatch.");
// output matrix size
uint width = v->w;
uint height = m->h;
Matrix *mv = createMatrix(width, height);
// data size
uint valuesSizeInBytes = m->nzNbr * sizeof(float);
uint col_indSizeInBytes = m->nzNbr * sizeof(uint);
uint row_ptrSizeInBytes = (m->h + 1) * sizeof(uint);
uint vSizeInBytes = (v->h) * sizeof(float);
uint mvSizeInBytes = (m->h) * sizeof(float);
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement
int *gpuWakeUp;
gpuErrchk( hipMalloc( (void**) &gpuWakeUp, 1) );
// STUDENTS BEGIN
//TODO
// STUDENTS END
// check result, display run time if result is correct
bool displayRunTime = true;
if( reference )
{
if(! checkResult(name, reference, mv))
displayRunTime = false;
}
if(displayRunTime)
printf("%s: M(%dx%d)xV computed in %f ms (%f ms of pure computation).\n", name, m->w, m->h, gpuRunTime, gpuComputeTime);
return mv;
}
| da39da62c65d62956f51624a4072637cb40c97a5.cu | #include<stdio.h>
#include<math.h> // for ceil()
#include<stdexcept>
#include"tools.h"
#include"common.h"
// CUDA debugging
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define gpuKernelExecErrChk() { gpuCheckKernelExecutionError( __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if( abort )
exit(code);
}
}
inline void gpuCheckKernelExecutionError( const char *file, int line)
{
/**
Check for invalid launch argument, then force the host to wait
until the kernel stops and checks for an execution error.
The synchronisation can be eliminated if there is a subsequent blocking
API call like cudaMemcopy. In this case the cudaMemcpy call can return
either errors which occurred during the kernel execution or those from
the memory copy itself. This can be confusing for the beginner, so it is
recommended to use explicit synchronisation after a kernel launch during
debugging to make it easier to understand where problems might be arising.
*/
gpuAssert( cudaPeekAtLastError(), file, line);
gpuAssert( cudaDeviceSynchronize(), file, line);
}
//---------------------------------------------------------
// STUDENTS BEGIN
__global__ void kernelSpmvCSR(uint rowsNbr, const float *values, const uint *col_ind, const uint *row_ptr, const float *v, float *y)
{
uint r = blockIdx.x * blockDim.x + threadIdx.x;
if( r < rowsNbr )
{
float dot = 0.0f;
int row_beg = row_ptr[r];
int row_end = row_ptr[r+1];
for(uint i = row_beg; i < row_end; i++)
dot += values[i] * v[col_ind[i]];
y[r] = dot;
}
}
// STUDENTS END
//---------------------------------------------------------
/**
Compute MxV on GPU. CSR method.
A reference result can be passed to check that the computation is ok.
*/
Matrix* gpuSpmvCSR(const MatrixCSR *m, const Matrix *v, const Matrix *reference = NULL)
{
const char *name = "CSR method on GPU";
double gpuComputeTime = 0.0; // time measurement
double gpuRunTime = 0.0; // time measurement
if(m->w != v->h)
throw std::runtime_error("Failed to multiply matrices, size mismatch.");
if(v->w != 1)
throw std::runtime_error("Failed to multiply matrices, vector size mismatch.");
// output matrix size
uint width = v->w;
uint height = m->h;
Matrix *mv = createMatrix(width, height);
// data size
uint valuesSizeInBytes = m->nzNbr * sizeof(float);
uint col_indSizeInBytes = m->nzNbr * sizeof(uint);
uint row_ptrSizeInBytes = (m->h + 1) * sizeof(uint);
uint vSizeInBytes = (v->h) * sizeof(float);
uint mvSizeInBytes = (m->h) * sizeof(float);
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement
int *gpuWakeUp;
gpuErrchk( cudaMalloc( (void**) &gpuWakeUp, 1) );
// STUDENTS BEGIN
// On va appeler la méthode kernelSpmvCSR mais d'abord il faut allouer les paramètres à passer
float* valuesIn;
gpuErrchk(cudaMalloc((void**) &valuesIn, valuesSizeInBytes));
uint* colIndIn;
gpuErrchk(cudaMalloc((void**) &colIndIn, col_indSizeInBytes));
uint* rowPtrIn;
gpuErrchk(cudaMalloc((void**) &rowPtrIn, row_ptrSizeInBytes));
float* vIn;
gpuErrchk(cudaMalloc((void**) &vIn, vSizeInBytes));
float* mvOut;
gpuErrchk(cudaMalloc((void**) &mvOut, mvSizeInBytes));
// transfer data from CPU memory to GPU memory
top(0); // start time measurement
gpuErrchk(cudaMemcpy(valuesIn, m->data, valuesSizeInBytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(colIndIn, m->col_ind, col_indSizeInBytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(rowPtrIn, m->row_ptr, row_ptrSizeInBytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(vIn, v->data, vSizeInBytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(mvOut, mv->data, mvSizeInBytes, cudaMemcpyHostToDevice));
dim3 dimBlock(32);
dim3 dimGrid((int) ceilf(m->h*1.0/dimBlock.x));
top(1);
// Kernel launch
kernelSpmvCSR<<<dimGrid, dimBlock>>>(m->h, valuesIn, colIndIn, rowPtrIn, vIn, mvOut);
gpuKernelExecErrChk();
gpuComputeTime = top(1); // pure computation duration
gpuErrchk(cudaMemcpy(mv->data, mvOut, mvSizeInBytes, cudaMemcpyDeviceToHost));
gpuRunTime = top(0); // computation and memory transfert time
// release CPU memory
cudaFree(valuesIn);
cudaFree(colIndIn);
cudaFree(rowPtrIn);
cudaFree(vIn);
cudaFree(mvOut);
// STUDENTS END
// check result, display run time if result is correct
bool displayRunTime = true;
if( reference )
{
if(! checkResult(name, reference, mv))
displayRunTime = false;
}
if(displayRunTime)
printf("%s: M(%dx%d)xV computed in %f ms (%f ms of pure computation).\n", name, m->w, m->h, gpuRunTime, gpuComputeTime);
return mv;
}
//---------------------------------------------------------
// STUDENTS BEGIN
/*__global__ void kernelSpmvELL(uint rowsNbr, const float *values, const uint *col_ind, uint nzRowSz, const float *v, float *y)
{
}*/
// STUDENTS END
//---------------------------------------------------------
/**
Compute MxV on GPU. ELL method.
A reference result can be passed to check that the computation is ok.
*/
Matrix* gpuSpmvELL(const MatrixELL *m, const Matrix *v, const Matrix *reference = NULL)
{
const char *name = "ELL method on GPU";
double gpuComputeTime = 0.0; // time measurement
double gpuRunTime = 0.0; // time measurement
if(m->w != v->h)
throw std::runtime_error("Failed to multiply matrices, size mismatch.");
if(v->w != 1)
throw std::runtime_error("Failed to multiply matrices, vector size mismatch.");
// output matrix size
uint width = v->w;
uint height = m->h;
Matrix *mv = createMatrix(width, height);
// data size
uint valuesSizeInBytes = m->nzRowSz * m->h * sizeof(float);
uint col_indSizeInBytes = m->nzRowSz * m->h * sizeof(uint);
uint vSizeInBytes = (v->h) * sizeof(float);
uint mvSizeInBytes = (m->h) * sizeof(float);
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement
int *gpuWakeUp;
gpuErrchk( cudaMalloc( (void**) &gpuWakeUp, 1) );
// STUDENTS BEGIN
//TODO
// STUDENTS END
// check result, display run time if result is correct
bool displayRunTime = true;
if( reference )
{
if(! checkResult(name, reference, mv))
displayRunTime = false;
}
if(displayRunTime)
printf("%s: M(%dx%d)xV computed in %f ms (%f ms of pure computation).\n", name, m->w, m->h, gpuRunTime, gpuComputeTime);
return mv;
}
//---------------------------------------------------------
// STUDENTS BEGIN
__global__ void kernelSpmvCSRVect(uint rowsNbr, const float *values, const uint *col_ind, const uint *row_ptr, const float *v, float *y)
{
// dynamically allocated shared memory, size given at kernel call
extern __shared__ float dots[];
uint threadId = blockIdx.x * blockDim.x + threadIdx.x; // global thread index
uint warpId = threadId / 32; // global warp index
uint lane = threadId % 32; // thread index within the warp
uint r = warpId; // one row per warp
if( r < rowsNbr )
{
int row_beg = row_ptr[r];
int row_end = row_ptr[r+1];
dots[threadIdx.x] = 0.0f;
for(uint i = row_beg + lane; i < row_end; i+=32)
dots[threadIdx.x] += values[i] * v[col_ind[i]];
// parallel reduction in shared memory
if( lane < 16 ) dots[threadIdx.x] += dots[threadIdx.x + 16];
if( lane < 8 ) dots[threadIdx.x] += dots[threadIdx.x + 8];
if( lane < 4 ) dots[threadIdx.x] += dots[threadIdx.x + 4];
if( lane < 2 ) dots[threadIdx.x] += dots[threadIdx.x + 2];
if( lane < 1 ) dots[threadIdx.x] += dots[threadIdx.x + 1];
// first thread writes the result in global memory
if( lane == 0 )
y[r] = dots[threadIdx.x];
}
}
// STUDENTS END
//---------------------------------------------------------
/**
Compute MxV on GPU. CSR-Vect method.
A reference result can be passed to check that the computation is ok.
*/
Matrix* gpuSpmvCSRVect(const MatrixCSR *m, const Matrix *v, const Matrix *reference = NULL)
{
const char *name = "CSR-Vect method on GPU";
double gpuComputeTime = 0.0; // time measurement
double gpuRunTime = 0.0; // time measurement
if(m->w != v->h)
throw std::runtime_error("Failed to multiply matrices, size mismatch.");
if(v->w != 1)
throw std::runtime_error("Failed to multiply matrices, vector size mismatch.");
// output matrix size
uint width = v->w;
uint height = m->h;
Matrix *mv = createMatrix(width, height);
// data size
uint valuesSizeInBytes = m->nzNbr * sizeof(float);
uint col_indSizeInBytes = m->nzNbr * sizeof(uint);
uint row_ptrSizeInBytes = (m->h + 1) * sizeof(uint);
uint vSizeInBytes = (v->h) * sizeof(float);
uint mvSizeInBytes = (m->h) * sizeof(float);
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement
int *gpuWakeUp;
gpuErrchk( cudaMalloc( (void**) &gpuWakeUp, 1) );
// STUDENTS BEGIN
//TODO
// STUDENTS END
// check result, display run time if result is correct
bool displayRunTime = true;
if( reference )
{
if(! checkResult(name, reference, mv))
displayRunTime = false;
}
if(displayRunTime)
printf("%s: M(%dx%d)xV computed in %f ms (%f ms of pure computation).\n", name, m->w, m->h, gpuRunTime, gpuComputeTime);
return mv;
}
|
fc8762ea866b21f97734ca8829fb6967d54d513b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cmath>
#include <hip/hip_runtime.h>
using namespace std;
using FLOAT = float;
#define CHECK(test) if (test != hipSuccess) throw "error";
const int NTHR_PER_BLK = 256; // Number of CUDA threads per block
const int NBLOCK = 56*4; // Number of CUDA blocks (SMs on P100)
const int Npoint = NBLOCK*NTHR_PER_BLK; // No. of independent samples
const int Neq = 100000; // No. of generations to equilibrate
const int Ngen_per_block = 5000; // No. of generations per block
const int Nsample = 100; // No. of blocks to sample
const float DELTA = 2.0; // Random step size
// Explicitly typed constants so can easily work with both floats and floats
static const FLOAT FOUR = 4.0;
static const FLOAT TWO = 2.0;
static const FLOAT ONE = 1.0;
static const FLOAT HALF = 0.5;
static const FLOAT ZERO = 0.0;
__device__ __forceinline__ float EXP(float x) {return expf(x);}
__device__ __forceinline__ double EXP(double x) {return exp(x);}
__device__ __forceinline__ float SQRT(float x) {return sqrtf(x);}
__device__ __forceinline__ double SQRT(double x) {return sqrt(x);}
__device__
float LCG_random(unsigned int * seed) {
const unsigned int m = 2147483648;
const unsigned int a = 26757677;
const unsigned int c = 1;
*seed = (a * (*seed) + c) % m;
return (float) (*seed) / (float) m;
}
__device__
void LCG_random_init(unsigned int * seed) {
const unsigned int m = 2147483648;
const unsigned int a = 26757677;
const unsigned int c = 1;
*seed = (a * (*seed) + c) % m;
}
__global__ void SumWithinBlocks(const int n, const FLOAT* data, FLOAT* blocksums) {
int nthread = blockDim.x*gridDim.x;
int i = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ FLOAT sdata[512]; // max threads
// Every thread in every block computes partial sum over rest of vector
FLOAT st=ZERO;
while (i < n) {
st += data[i];
i+=nthread;
}
sdata[threadIdx.x] = st;
__syncthreads();
// Now do binary tree sum within a block
int tid = threadIdx.x;
for (unsigned int s=128; s>0; s>>=1) {
if (tid<s && (tid+s)<blockDim.x) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid==0) blocksums[blockIdx.x] = sdata[0];
}
void sum_stats(const int Npoint, const FLOAT* stats, FLOAT* statsum, FLOAT* blocksums) {
for (int what=0; what<4; what++) {
hipLaunchKernelGGL(( SumWithinBlocks), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, stats+what*Npoint, blocksums);
hipLaunchKernelGGL(( SumWithinBlocks), dim3(1),dim3(NBLOCK), 0, 0, NBLOCK, blocksums, statsum+what);
}
}
__device__ __forceinline__ void compute_distances(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2,
FLOAT& r1, FLOAT& r2, FLOAT& r12) {
r1 = SQRT(x1*x1 + y1*y1 + z1*z1);
r2 = SQRT(x2*x2 + y2*y2 + z2*z2);
FLOAT xx = x1-x2;
FLOAT yy = y1-y2;
FLOAT zz = z1-z2;
r12 = SQRT(xx*xx + yy*yy + zz*zz);
}
__device__ __forceinline__ FLOAT wave_function(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2) {
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
return (ONE + HALF*r12)*EXP(-TWO*(r1 + r2));
}
// Initialize random number generator
__global__ void initran(unsigned int seed, unsigned int* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
states[i] = seed ^ i;
LCG_random_init(&states[i]);
}
// ZERO stats counters on the GPU
__global__ void zero_stats(int Npoint, FLOAT* stats) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
stats[0*Npoint+i] = ZERO; // r1
stats[1*Npoint+i] = ZERO; // r2
stats[2*Npoint+i] = ZERO; // r12
stats[3*Npoint+i] = ZERO; // accept count
}
// initializes samples
__global__ void initialize(FLOAT* x1, FLOAT* y1, FLOAT* z1, FLOAT* x2, FLOAT* y2, FLOAT* z2, FLOAT* psi, unsigned int* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
x1[i] = (LCG_random(states+i) - HALF)*FOUR;
y1[i] = (LCG_random(states+i) - HALF)*FOUR;
z1[i] = (LCG_random(states+i) - HALF)*FOUR;
x2[i] = (LCG_random(states+i) - HALF)*FOUR;
y2[i] = (LCG_random(states+i) - HALF)*FOUR;
z2[i] = (LCG_random(states+i) - HALF)*FOUR;
psi[i] = wave_function(x1[i], y1[i], z1[i], x2[i], y2[i], z2[i]);
}
__global__ void propagate(const int Npoint, const int nstep, FLOAT* X1, FLOAT* Y1, FLOAT* Z1,
FLOAT* X2, FLOAT* Y2, FLOAT* Z2, FLOAT* P, FLOAT* stats, unsigned int* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
FLOAT x1 = X1[i];
FLOAT y1 = Y1[i];
FLOAT z1 = Z1[i];
FLOAT x2 = X2[i];
FLOAT y2 = Y2[i];
FLOAT z2 = Z2[i];
FLOAT p = P[i];
for (int step=0; step<nstep; step++) {
FLOAT x1new = x1 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT y1new = y1 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT z1new = z1 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT x2new = x2 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT y2new = y2 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT z2new = z2 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT pnew = wave_function(x1new, y1new, z1new, x2new, y2new, z2new);
if (pnew*pnew > p*p*LCG_random(states+i)) {
stats[3*Npoint+i]++; //naccept ++;
p = pnew;
x1 = x1new;
y1 = y1new;
z1 = z1new;
x2 = x2new;
y2 = y2new;
z2 = z2new;
}
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
stats[0*Npoint+i] += r1;
stats[1*Npoint+i] += r2;
stats[2*Npoint+i] += r12;
}
X1[i] = x1;
Y1[i] = y1;
Z1[i] = z1;
X2[i] = x2;
Y2[i] = y2;
Z2[i] = z2;
P[i] = p;
}
int main() {
FLOAT *x1, *y1, *z1, *x2, *y2, *z2, *psi, *stats, *statsum, *blocksums;
unsigned int *ranstates;
CHECK(hipMalloc((void **)&x1, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&y1, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&z1, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&x2, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&y2, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&z2, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&psi, Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&stats, 4 * Npoint * sizeof(FLOAT)));
CHECK(hipMalloc((void **)&blocksums, NBLOCK * sizeof(FLOAT))); // workspace for summation
CHECK(hipMalloc((void **)&statsum, 4 * sizeof(FLOAT))); // workspace for summation
CHECK(hipMalloc((void **)&ranstates, Npoint*sizeof(unsigned int)));
hipLaunchKernelGGL(( initran), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, 5551212, ranstates);
hipLaunchKernelGGL(( initialize), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, x1, y1, z1, x2, y2, z2, psi, ranstates);
hipLaunchKernelGGL(( zero_stats), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, stats);
// Equilibrate
hipLaunchKernelGGL(( propagate), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, Neq, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
// Accumulators for averages over blocks --- use doubles
double r1_tot = ZERO, r1_sq_tot = ZERO;
double r2_tot = ZERO, r2_sq_tot = ZERO;
double r12_tot = ZERO, r12_sq_tot = ZERO;
double naccept = ZERO; // Keeps track of propagation efficiency
for (int sample=0; sample<Nsample; sample++) {
hipLaunchKernelGGL(( zero_stats), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, stats);
hipLaunchKernelGGL(( propagate), dim3(NBLOCK),dim3(NTHR_PER_BLK), 0, 0, Npoint, Ngen_per_block, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
struct {FLOAT r1, r2, r12, accept;} s;
sum_stats(Npoint, stats, statsum, blocksums);
CHECK(hipMemcpy(&s, statsum, sizeof(s), hipMemcpyDeviceToHost));
naccept += s.accept;
s.r1 /= Ngen_per_block*Npoint;
s.r2 /= Ngen_per_block*Npoint;
s.r12 /= Ngen_per_block*Npoint;
printf(" block %6d %.6f %.6f %.6f\n", sample, s.r1, s.r2, s.r12);
r1_tot += s.r1; r1_sq_tot += s.r1*s.r1;
r2_tot += s.r2; r2_sq_tot += s.r2*s.r2;
r12_tot += s.r12; r12_sq_tot += s.r12*s.r12;
}
r1_tot /= Nsample; r1_sq_tot /= Nsample;
r2_tot /= Nsample; r2_sq_tot /= Nsample;
r12_tot /= Nsample; r12_sq_tot /= Nsample;
double r1s = sqrt((r1_sq_tot - r1_tot*r1_tot) / Nsample);
double r2s = sqrt((r2_sq_tot - r2_tot*r2_tot) / Nsample);
double r12s = sqrt((r12_sq_tot - r12_tot*r12_tot) / Nsample);
printf(" <r1> = %.6f +- %.6f\n", r1_tot, r1s);
printf(" <r2> = %.6f +- %.6f\n", r2_tot, r2s);
printf(" <r12> = %.6f +- %.6f\n", r12_tot, r12s);
printf(" acceptance ratio=%.1f%%\n",100.0*naccept/double(Npoint)/double(Ngen_per_block)/double(Nsample)); // avoid int overflow
CHECK(hipFree(x1));
CHECK(hipFree(y1));
CHECK(hipFree(z1));
CHECK(hipFree(x2));
CHECK(hipFree(y2));
CHECK(hipFree(z2));
CHECK(hipFree(psi));
CHECK(hipFree(stats));
CHECK(hipFree(blocksums));
CHECK(hipFree(statsum));
CHECK(hipFree(ranstates));
return 0;
}
| fc8762ea866b21f97734ca8829fb6967d54d513b.cu | #include <cstdio>
#include <cmath>
#include <cuda.h>
using namespace std;
using FLOAT = float;
#define CHECK(test) if (test != cudaSuccess) throw "error";
const int NTHR_PER_BLK = 256; // Number of CUDA threads per block
const int NBLOCK = 56*4; // Number of CUDA blocks (SMs on P100)
const int Npoint = NBLOCK*NTHR_PER_BLK; // No. of independent samples
const int Neq = 100000; // No. of generations to equilibrate
const int Ngen_per_block = 5000; // No. of generations per block
const int Nsample = 100; // No. of blocks to sample
const float DELTA = 2.0; // Random step size
// Explicitly typed constants so can easily work with both floats and floats
static const FLOAT FOUR = 4.0;
static const FLOAT TWO = 2.0;
static const FLOAT ONE = 1.0;
static const FLOAT HALF = 0.5;
static const FLOAT ZERO = 0.0;
__device__ __forceinline__ float EXP(float x) {return expf(x);}
__device__ __forceinline__ double EXP(double x) {return exp(x);}
__device__ __forceinline__ float SQRT(float x) {return sqrtf(x);}
__device__ __forceinline__ double SQRT(double x) {return sqrt(x);}
__device__
float LCG_random(unsigned int * seed) {
const unsigned int m = 2147483648;
const unsigned int a = 26757677;
const unsigned int c = 1;
*seed = (a * (*seed) + c) % m;
return (float) (*seed) / (float) m;
}
__device__
void LCG_random_init(unsigned int * seed) {
const unsigned int m = 2147483648;
const unsigned int a = 26757677;
const unsigned int c = 1;
*seed = (a * (*seed) + c) % m;
}
__global__ void SumWithinBlocks(const int n, const FLOAT* data, FLOAT* blocksums) {
int nthread = blockDim.x*gridDim.x;
int i = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ FLOAT sdata[512]; // max threads
// Every thread in every block computes partial sum over rest of vector
FLOAT st=ZERO;
while (i < n) {
st += data[i];
i+=nthread;
}
sdata[threadIdx.x] = st;
__syncthreads();
// Now do binary tree sum within a block
int tid = threadIdx.x;
for (unsigned int s=128; s>0; s>>=1) {
if (tid<s && (tid+s)<blockDim.x) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid==0) blocksums[blockIdx.x] = sdata[0];
}
void sum_stats(const int Npoint, const FLOAT* stats, FLOAT* statsum, FLOAT* blocksums) {
for (int what=0; what<4; what++) {
SumWithinBlocks<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, stats+what*Npoint, blocksums);
SumWithinBlocks<<<1,NBLOCK>>>(NBLOCK, blocksums, statsum+what);
}
}
__device__ __forceinline__ void compute_distances(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2,
FLOAT& r1, FLOAT& r2, FLOAT& r12) {
r1 = SQRT(x1*x1 + y1*y1 + z1*z1);
r2 = SQRT(x2*x2 + y2*y2 + z2*z2);
FLOAT xx = x1-x2;
FLOAT yy = y1-y2;
FLOAT zz = z1-z2;
r12 = SQRT(xx*xx + yy*yy + zz*zz);
}
__device__ __forceinline__ FLOAT wave_function(FLOAT x1, FLOAT y1, FLOAT z1, FLOAT x2, FLOAT y2, FLOAT z2) {
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
return (ONE + HALF*r12)*EXP(-TWO*(r1 + r2));
}
// Initialize random number generator
__global__ void initran(unsigned int seed, unsigned int* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
states[i] = seed ^ i;
LCG_random_init(&states[i]);
}
// ZERO stats counters on the GPU
__global__ void zero_stats(int Npoint, FLOAT* stats) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
stats[0*Npoint+i] = ZERO; // r1
stats[1*Npoint+i] = ZERO; // r2
stats[2*Npoint+i] = ZERO; // r12
stats[3*Npoint+i] = ZERO; // accept count
}
// initializes samples
__global__ void initialize(FLOAT* x1, FLOAT* y1, FLOAT* z1, FLOAT* x2, FLOAT* y2, FLOAT* z2, FLOAT* psi, unsigned int* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
x1[i] = (LCG_random(states+i) - HALF)*FOUR;
y1[i] = (LCG_random(states+i) - HALF)*FOUR;
z1[i] = (LCG_random(states+i) - HALF)*FOUR;
x2[i] = (LCG_random(states+i) - HALF)*FOUR;
y2[i] = (LCG_random(states+i) - HALF)*FOUR;
z2[i] = (LCG_random(states+i) - HALF)*FOUR;
psi[i] = wave_function(x1[i], y1[i], z1[i], x2[i], y2[i], z2[i]);
}
__global__ void propagate(const int Npoint, const int nstep, FLOAT* X1, FLOAT* Y1, FLOAT* Z1,
FLOAT* X2, FLOAT* Y2, FLOAT* Z2, FLOAT* P, FLOAT* stats, unsigned int* states) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
FLOAT x1 = X1[i];
FLOAT y1 = Y1[i];
FLOAT z1 = Z1[i];
FLOAT x2 = X2[i];
FLOAT y2 = Y2[i];
FLOAT z2 = Z2[i];
FLOAT p = P[i];
for (int step=0; step<nstep; step++) {
FLOAT x1new = x1 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT y1new = y1 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT z1new = z1 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT x2new = x2 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT y2new = y2 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT z2new = z2 + (LCG_random(states+i)-HALF)*DELTA;
FLOAT pnew = wave_function(x1new, y1new, z1new, x2new, y2new, z2new);
if (pnew*pnew > p*p*LCG_random(states+i)) {
stats[3*Npoint+i]++; //naccept ++;
p = pnew;
x1 = x1new;
y1 = y1new;
z1 = z1new;
x2 = x2new;
y2 = y2new;
z2 = z2new;
}
FLOAT r1, r2, r12;
compute_distances(x1, y1, z1, x2, y2, z2, r1, r2, r12);
stats[0*Npoint+i] += r1;
stats[1*Npoint+i] += r2;
stats[2*Npoint+i] += r12;
}
X1[i] = x1;
Y1[i] = y1;
Z1[i] = z1;
X2[i] = x2;
Y2[i] = y2;
Z2[i] = z2;
P[i] = p;
}
int main() {
FLOAT *x1, *y1, *z1, *x2, *y2, *z2, *psi, *stats, *statsum, *blocksums;
unsigned int *ranstates;
CHECK(cudaMalloc((void **)&x1, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&y1, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&z1, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&x2, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&y2, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&z2, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&psi, Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&stats, 4 * Npoint * sizeof(FLOAT)));
CHECK(cudaMalloc((void **)&blocksums, NBLOCK * sizeof(FLOAT))); // workspace for summation
CHECK(cudaMalloc((void **)&statsum, 4 * sizeof(FLOAT))); // workspace for summation
CHECK(cudaMalloc((void **)&ranstates, Npoint*sizeof(unsigned int)));
initran<<<NBLOCK,NTHR_PER_BLK>>>(5551212, ranstates);
initialize<<<NBLOCK,NTHR_PER_BLK>>>(x1, y1, z1, x2, y2, z2, psi, ranstates);
zero_stats<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, stats);
// Equilibrate
propagate<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, Neq, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
// Accumulators for averages over blocks --- use doubles
double r1_tot = ZERO, r1_sq_tot = ZERO;
double r2_tot = ZERO, r2_sq_tot = ZERO;
double r12_tot = ZERO, r12_sq_tot = ZERO;
double naccept = ZERO; // Keeps track of propagation efficiency
for (int sample=0; sample<Nsample; sample++) {
zero_stats<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, stats);
propagate<<<NBLOCK,NTHR_PER_BLK>>>(Npoint, Ngen_per_block, x1, y1, z1, x2, y2, z2, psi, stats, ranstates);
struct {FLOAT r1, r2, r12, accept;} s;
sum_stats(Npoint, stats, statsum, blocksums);
CHECK(cudaMemcpy(&s, statsum, sizeof(s), cudaMemcpyDeviceToHost));
naccept += s.accept;
s.r1 /= Ngen_per_block*Npoint;
s.r2 /= Ngen_per_block*Npoint;
s.r12 /= Ngen_per_block*Npoint;
printf(" block %6d %.6f %.6f %.6f\n", sample, s.r1, s.r2, s.r12);
r1_tot += s.r1; r1_sq_tot += s.r1*s.r1;
r2_tot += s.r2; r2_sq_tot += s.r2*s.r2;
r12_tot += s.r12; r12_sq_tot += s.r12*s.r12;
}
r1_tot /= Nsample; r1_sq_tot /= Nsample;
r2_tot /= Nsample; r2_sq_tot /= Nsample;
r12_tot /= Nsample; r12_sq_tot /= Nsample;
double r1s = sqrt((r1_sq_tot - r1_tot*r1_tot) / Nsample);
double r2s = sqrt((r2_sq_tot - r2_tot*r2_tot) / Nsample);
double r12s = sqrt((r12_sq_tot - r12_tot*r12_tot) / Nsample);
printf(" <r1> = %.6f +- %.6f\n", r1_tot, r1s);
printf(" <r2> = %.6f +- %.6f\n", r2_tot, r2s);
printf(" <r12> = %.6f +- %.6f\n", r12_tot, r12s);
printf(" acceptance ratio=%.1f%%\n",100.0*naccept/double(Npoint)/double(Ngen_per_block)/double(Nsample)); // avoid int overflow
CHECK(cudaFree(x1));
CHECK(cudaFree(y1));
CHECK(cudaFree(z1));
CHECK(cudaFree(x2));
CHECK(cudaFree(y2));
CHECK(cudaFree(z2));
CHECK(cudaFree(psi));
CHECK(cudaFree(stats));
CHECK(cudaFree(blocksums));
CHECK(cudaFree(statsum));
CHECK(cudaFree(ranstates));
return 0;
}
|
ecbb19e7f64fdb1e9e654c5959dcb29f5b27ae23.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#define K 1
using namespace std;
#define spmv_NBLOCKS 256
#define spmv_BLOCK_SIZE 128
#define WARP_SIZE 32
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
hipHostMalloc(newA_ptr, paddedSize * sizeof(float));
hipHostMalloc(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
| ecbb19e7f64fdb1e9e654c5959dcb29f5b27ae23.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#define K 1
using namespace std;
#define spmv_NBLOCKS 256
#define spmv_BLOCK_SIZE 128
#define WARP_SIZE 32
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
cudaMallocHost(newA_ptr, paddedSize * sizeof(float));
cudaMallocHost(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.