serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
13,101 | // Kernel definition
#define MASK_LEN 29
#define WARP_SIZE 32
// TODO: include beesgrid headers for mask enum
enum MASK {
INNER_BLACK_SEMICIRCLE,
CELL_0_BLACK = 1,
CELL_1_BLACK,
CELL_2_BLACK,
CELL_3_BLACK,
CELL_4_BLACK,
CELL_5_BLACK,
CELL_6_BLACK,
CELL_7_BLACK,
CELL_8_BLACK,
CELL_9_BLACK,
CELL_10_BLACK,
CELL_11_BLACK,
BACKGROUND_RING,
IGNORE = 128,
CELL_0_WHITE = IGNORE + 1,
CELL_1_WHITE = IGNORE + 2,
CELL_2_WHITE = IGNORE + 3,
CELL_3_WHITE = IGNORE + 4,
CELL_4_WHITE = IGNORE + 5,
CELL_5_WHITE = IGNORE + 6,
CELL_6_WHITE = IGNORE + 7,
CELL_7_WHITE = IGNORE + 8,
CELL_8_WHITE = IGNORE + 9,
CELL_9_WHITE = IGNORE + 10,
CELL_10_WHITE = IGNORE + 11,
CELL_11_WHITE = IGNORE + 12,
OUTER_WHITE_RING = IGNORE + 20,
INNER_WHITE_SEMICIRCLE = IGNORE + 21
};
__device__ const int MASKS_INDICIES[]= {
INNER_BLACK_SEMICIRCLE, CELL_0_BLACK, CELL_1_BLACK, CELL_2_BLACK, CELL_3_BLACK,
CELL_4_BLACK, CELL_5_BLACK, CELL_6_BLACK, CELL_7_BLACK, CELL_8_BLACK, CELL_9_BLACK,
CELL_10_BLACK, CELL_11_BLACK, BACKGROUND_RING, IGNORE, CELL_0_WHITE, CELL_1_WHITE, CELL_2_WHITE,
CELL_3_WHITE, CELL_4_WHITE, CELL_5_WHITE, CELL_6_WHITE, CELL_7_WHITE, CELL_8_WHITE,
CELL_9_WHITE, CELL_10_WHITE, CELL_11_WHITE, OUTER_WHITE_RING, INNER_WHITE_SEMICIRCLE
};
__device__ inline int index2(const int dim1, const int y, const int x) {
return dim1*y + x;
}
__device__ inline int index3(const int dim2, const int dim1,
const int z, const int y, const int x) {
return dim2*dim1*z + dim1*y + x;
}
__device__ inline int index4(const int dim3, const int dim2, const int dim1,
const int w, const int z, const int y, const int x) {
return dim3*dim2*dim1*w + dim2*dim1*z + dim1*y + x;
}
template<bool sum_grad_provided, bool pow_grad_provided>
__device__ inline void tmpl_image_mask_split_grad(const float * mask, const float * image,
const float * out_grad_sum, const float * out_grad_pow,
const int bs, const int N, float * grad)
{
const int b = blockIdx.x;
const int block_r = blockIdx.y * blockDim.y;
const int block_c = blockIdx.z * blockDim.z;
const int r = block_r + threadIdx.y;
const int c = block_c + threadIdx.z;
const int s_idx_base = index4(bs, N, N, 0, b, r, c);
const int next_mask_offset = bs*N*N;
const int index = index3(N, N, b, r, c);
if (b < bs && r < N && c < N && index < bs*N*N) {
float mySum = 0;
for(int i = 0; i < MASK_LEN; i++) {
const int s_idx = s_idx_base + i*next_mask_offset;
if(mask[index] == MASKS_INDICIES[i]) {
if(sum_grad_provided) {
mySum += out_grad_sum[s_idx];
}
if (pow_grad_provided) {
mySum += 2*image[index]*out_grad_pow[s_idx];
}
}
}
grad[index] = mySum;
}
}
extern "C" {
__global__ void to_sum_var_count(const float * reduced, const int n,
float * sum, float * var, float * count) {
const int block = blockIdx.x * blockDim.x;
const int tid = threadIdx.x;
const int idx = block + tid;
int new_pos = MASK_LEN*floorf(idx / float(3*MASK_LEN)) + idx % MASK_LEN;
if(idx % 3*MASK_LEN < MASK_LEN) {
sum[new_pos] = reduced[idx];
} else if(idx % 3*MASK_LEN < 2*MASK_LEN) {
new_pos += MASK_LEN;
var[new_pos] = reduced[idx] - pow(reduced[idx - MASK_LEN], 2);
} else {
new_pos += 2*MASK_LEN;
count[new_pos] = reduced[idx];
}
}
__global__ void image_mask_split(const float * mask, const float * image,
const int bs, const int N, float * o_split)
{
const int block_r = blockIdx.y * blockDim.y;
const int block_c = blockIdx.z * blockDim.z;
const int r = block_r + threadIdx.y;
const int c = block_c + threadIdx.z;
const int b = blockIdx.x;
if (b < bs && r < N && c < N) {
const int s_idx_base = index4(bs, N, N, 0, b, r, c);
const int next_mask_offset = bs*N*N;
const int offset = bs*N*N*MASK_LEN;
const int index = index3(N, N,
b, r, c);
for(int i = 0; i < MASK_LEN; i++) {
const int s_idx = s_idx_base + i*next_mask_offset;
if(mask[index] == MASKS_INDICIES[i]) {
o_split[s_idx] = image[index];
o_split[s_idx + offset] = pow(image[index], 2);
o_split[s_idx + 2*offset] = 1;
}
}
}
}
__global__ void image_mask_split_grad_sum_pow(const float * mask, const float * image,
const float * out_grad_sum, const float * out_grad_pow,
const int bs, const int N, float * grad)
{
tmpl_image_mask_split_grad<true, true>(mask, image, out_grad_sum, out_grad_pow, bs, N, grad);
}
__global__ void image_mask_split_grad_sum(const float * mask, const float * image,
const float * out_grad_sum, const int bs,
const int N, float * grad)
{
tmpl_image_mask_split_grad<true, false>(mask, image, out_grad_sum, NULL, bs, N, grad);
}
__global__ void image_mask_split_grad_pow(const float * mask, const float * image,
const float * out_grad_pow, const int bs,
const int N, float * grad)
{
tmpl_image_mask_split_grad<false, true>(mask, image, NULL, out_grad_pow, bs, N, grad);
}
}
|
13,102 | #include <stdio.h>
#include <iostream>
#include <string>
#include <cuda.h>
#include <chrono>
int tid;
double pi = 0;
static void HandleError( cudaError_t err,
const char *file,
int line )
{
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// Kernel that executes on the CUDA device
__global__ void cal_pi(double *sum, int nbin, int nthreads, int nblocks) {
unsigned int i;
double x;
// Sequential thread index across the blocks
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for (i=idx; i < nbin; i+=nthreads*nblocks) {
x = (i+.5) / nbin;
sum[idx] += 4./(1. + x*x);
}
}
// Main routine that executes on the host
int main(int argc, char* argv[]) {
/* Settings */
unsigned int n_steps = 1<<std::stoi(argv[1]);
unsigned int nblocks = std::stoi(argv[2]);
unsigned int nthreads = std::stoi(argv[3]);
printf(" N = %11u\n", n_steps);
printf(" N thread blocks = %11i\n", nblocks);
printf("N threads per block = %11i\n", nblocks);
dim3 dimGrid(nblocks,1,1); // Grid dimensions
dim3 dimBlock(nthreads,1,1); // Block dimensions
double *sumHost, *sumDev; // Pointer to host & device arrays
size_t size = nblocks*nthreads*sizeof(double); // Size of the device array
sumHost = (double *)malloc(size); // Allocate array on host
HANDLE_ERROR(cudaMalloc((void **) &sumDev, size)); // Allocate array on device
auto t1 = std::chrono::system_clock::now();
// Initialization
HANDLE_ERROR(cudaMemset(sumDev, 0., size));
/* Invoke the CUDA kernel */
cal_pi <<<dimGrid, dimBlock>>> (sumDev, n_steps, nthreads, nblocks); // call CUDA kernel
cudaDeviceSynchronize(); // Wait for calculations finished
/* Reduction */
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
for(tid=0; tid<nblocks*nthreads; ++tid) {
pi += sumHost[tid];
}
pi /= n_steps;
auto t2 = std::chrono::system_clock::now();
/* Results */
printf("\nPI = %1.18f\n",pi);
std::cout << "Computation time: "
<< std::chrono::duration <double, std::milli> (t2 - t1).count()
<< " ms.\n";
// Free memory
free(sumHost);
cudaFree(sumDev);
return 0;
}
|
13,103 | #include <cstdio>
#include <cmath>
#define ARR_SIZE 64
__global__
void shift_no_barrier(int *d_array)
{
int idx=threadIdx.x;
__shared__ int sh_array[ARR_SIZE];
sh_array[idx]=threadIdx.x;
if (idx<ARR_SIZE-1)
sh_array[idx] = sh_array[idx+1];
d_array[idx] = sh_array[idx];
}
__global__
void shift_with_barrier(int *d_array)
{
int idx=threadIdx.x;
__shared__ int sh_array[ARR_SIZE];
sh_array[idx]=threadIdx.x;
__syncthreads();
if (idx<ARR_SIZE-1) {
int temp = sh_array[idx+1];
__syncthreads();
sh_array[idx] = temp;
__syncthreads();
}
d_array[idx] = sh_array[idx];
}
int main(void)
{
const int ARR_BYTES=ARR_SIZE*sizeof(float);
int h_array[ARR_SIZE];
// Declare and alloc array on device
int *d_array;
cudaMalloc((void **) &d_array, ARR_BYTES);
// No Barrier
shift_no_barrier<<<1, ARR_SIZE>>>(d_array);
cudaMemcpy(h_array, d_array, ARR_BYTES, cudaMemcpyDeviceToHost);
for (int i=0; i<ARR_SIZE; i++){
printf("%d, ", h_array[i]);
}
printf("\n");
// With Barrier
shift_with_barrier<<<1, ARR_SIZE>>>(d_array);
cudaMemcpy(h_array, d_array, ARR_BYTES, cudaMemcpyDeviceToHost);
for (int i=0; i<ARR_SIZE; i++){
printf("%d, ", h_array[i]);
}
printf("\n");
// Free memory
cudaFree(d_array);
return 0;
}
|
13,104 | //
// COVID_spread.c
// Parallel Programming, Spring 2020
//
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<cuda.h>
#include<cuda_runtime.h>
//use random number generator to initialize the Graph
//this init function is after the initialization of MPI
//initialize Graph, G_data, G_resultdata and Day_of_cure
// Global function #1: Initialization
__global__ void gol_init(const int population, unsigned int** Graph, unsigned int* G_data, unsigned int* G_resultdata, unsigned int * Day_of_cure, unsigned int* Num_of_connections_per_person, int max_connections ){
int i;
// Initialize G_data, G_resultData and Day_of_cure on the CPU/GPU
for (i = 0; i < population; i ++){
G_data[i] = 0;
G_resultdata[i] = 0;
Day_of_cure[i] = -1;
}
return;
}
extern "C" void gol_init_master(const int population, unsigned int** Graph, unsigned int* G_data, unsigned int* G_resultdata, unsigned int * Day_of_cure, int myrank, unsigned int* Num_of_connections_per_person, int threadsCount, int max_connections ){
int N=population;
int numBlocks = (N+threadsCount-1) / threadsCount;
// Initialize memory
cudaMallocManaged(&Graph, (population * sizeof(unsigned int*)));
cudaMallocManaged(&G_data, (population * sizeof(unsigned int)));
cudaMallocManaged(&G_resultdata, (population * sizeof(unsigned int)));
cudaMallocManaged(&Day_of_cure, (population * sizeof(unsigned int)));
cudaMallocManaged(&Num_of_connections_per_person, (population * sizeof(unsigned int)));
int i;
for (i = 0; i < population; i++){
int num_connections = rand() % max_connections + 1;
Num_of_connections_per_person[i] = num_connections;
cudaMallocManaged(&Graph[i], (num_connections * sizeof(unsigned int)));
}
// Initialize Graph on the CPU/GPU
for (i = 0; i < population; i++){
// Generate connections for this indiviudal
int j;
for (j = 0; j < Num_of_connections_per_person[j]; j++){
int curr_connection = rand() % population + 1;
Graph[i][j] = curr_connection;
}
}
gol_init<<<numBlocks, threadsCount>>>(population,Graph,G_data,G_resultdata,Day_of_cure,Num_of_connections_per_person, max_connections);
int cE, cudaDeviceCount;
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
}
//Generate G_resultdata from current G_data and current Day_of_cure, update Day_of_cure
// Global Function #2: Iteration
__global__ void gol_kernel(const unsigned int* G_data, unsigned int* G_resultData, unsigned int** Graph, unsigned int* Day_of_cure, int threshold, int threadsCount, int currDay, const int population, unsigned int* Num_of_connections_per_person, const int recovery_period, unsigned int* invunerable_individuals){
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 1 -> invunerable
// 2 -> vunerable
// Cure individuals that recover on this day
int j;
for (j = index; j < population; j += blockDim.x * gridDim.x){
// If the individual is infected
if (G_data[j] == 1){
// In the case today is the day this individual is to be cured
if (Day_of_cure[j] == currDay){
G_resultData[j] = 0;
invunerable_individuals[j] = 1;
}
}
}
// Process individuals that are infected
for (j = index; j < population; j+= blockDim.x * gridDim.x){
// In the case that the current indiviudal is infected
if (G_data[j] == 1){
// Process his/her connections and note its spread
int i;
for (i = 0; i < Num_of_connections_per_person[j]; i++){
// If the connection is not currently infected
if (G_data[i] == 0 && invunerable_individuals[i] != 1){
G_resultData[i] = 1;
Day_of_cure[i] = currDay + recovery_period;
}
}
}
}
return;
}
//count the number of infected people in one iteration
__global__ void countInfectedPeople (unsigned int* G_data, const int population, int* count){
int i;
int index = blockIdx.x *blockDim.x + threadIdx.x;
for (i=index;i<population;i+=blockDim.x * gridDim.x){
if (G_data[i]==1) *count++;
}
return;
}
extern "C" int getInfectedPeople(unsigned int* G_data, const int population, int threadsCount){
int N=population;
int numBlocks= (N+threadsCount-1)/threadsCount;
int count = 0;
countInfectedPeople<<<numBlocks, threadsCount>>>(G_data, population, &count);
return count;
}
extern "C" void gol_swap( unsigned int **G_data, unsigned int **G_resultdata)
{
// You write this function - it should swap the pointers of pA and pB.
unsigned int * tmp;
//reset all elements in pB to 0
tmp=*G_data;
*G_data=*G_resultdata;
*G_resultdata=tmp;
}
//gol_kernelLaunch returns the number of iterations to let the number infected people be larger than threshold
extern "C" int gol_kernelLaunch(unsigned int** G_data,
unsigned int** G_resultData,
unsigned int*** Graph,
unsigned int** Day_of_cure,
const int population,
const int threshold,
int threadsCount,
unsigned int** Num_of_connections_per_person,
const int recovery_period
){
unsigned int* D_data = *G_data;
unsigned int* D_resultData = *G_resultData;
int N=population;
int numBlocks= (N+threadsCount-1)/threadsCount;
int infected = getInfectedPeople(D_data,population,threadsCount);
int i=0;
while(infected<=threshold)
{
unsigned int* invunerable_individuals;
cudaMallocManaged(&invunerable_individuals, population * sizeof(unsigned int));
int j;
for (j = 0; j < population; j++){
invunerable_individuals[j] = 0;
}
gol_kernel<<<numBlocks, threadsCount>>>(D_data, D_resultData, *Graph, *Day_of_cure, threshold, threadsCount, i, population, *Num_of_connections_per_person, recovery_period, invunerable_individuals);
infected=getInfectedPeople(D_resultData,population,threadsCount);
cudaDeviceSynchronize();
gol_swap(&D_data, &D_resultData);
i++;
cudaFree(&invunerable_individuals);
}
G_data = &D_data;
G_resultData = &D_resultData;
cudaDeviceSynchronize();
return i;
}
// print the indexes of infected people when the number of infected people is larger than threshold
extern "C" void gol_print_infected(unsigned int* G_data, const int population)
{
int i;
for( i = 0; i < population; i++)
{
printf("%d ",G_data[i]);
//print 100 indexes in each line
if (i+1% 100==0){
printf("\n");
}
}
printf("\n\n");
}
extern "C" void gol_print_cured(unsigned int** Day_of_cure, const int population){
}
|
13,105 | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int* a, int* b, int* c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
// Put count random integers into integer [array] pointed to by p
void random_ints(int *p, int count)
{
for (int i=0; i < count; i++) {
p[i] = rand() % 32768; // limit random numbers to [0, 32768]
}
}
#define N 512
int main(void)
{
int *a, *b, *c; // host copies of data
int *d_a, *d_b, *d_c; // devices copies of data
int size = N * sizeof(int); // Number of bytes for N integers
printf("Total integer space size is %d bytes\n", size);
// Allocate space for device copies of data
cudaMalloc((void**) &d_a, size);
cudaMalloc((void**) &d_b, size);
cudaMalloc((void**) &d_c, size);
// Allocate space for host copies of input values (a, b, c arrays)
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup integers
random_ints(a, N);
random_ints(b, N);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() on device
add<<<N,1>>>(d_a, d_b, d_c);
// Copy result to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Print out results
printf("\nResults of parallel GPU addition across %d elements:\n", N);
for (int i=0; i < N; i++) {
int sum = a[i] + b[i];
if (c[i] == sum) {
printf("%4d: %6d + %6d = %6d\n", i, a[i], b[i], c[i]);
}
else {
printf("%4d: %6d + %6d != %6d ERROR, should be %d", i, a[i], b[i], c[i], sum);
}
}
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
13,106 | #include <cuda.h>
#include <vector>
#include <cstdio>
#include <cstdlib>
__global__ void kernel(int* x, int* y, int m, int n) {
size_t tidx = threadIdx.x + blockIdx.x*blockDim.x;
size_t tidy = threadIdx.y + blockIdx.y*blockDim.y;
if (tidx < m && tidy < n) {
x[tidx*n + tidy] += y[tidx*n + tidy];
}
}
int main(int argc, char** argv) {
size_t m = 1000;
size_t n = 1000;
std::vector<int> x(m*n, 1);
std::vector<int> y(m*n, 1);
int* d_x;
cudaMalloc(&d_x, sizeof(int)*m*n);
int* d_y;
cudaMalloc(&d_y, sizeof(int)*m*n);
cudaMemcpy(d_x, x.data(), sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y.data(), sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
size_t block_size = 16;
// ceil(grid_size / block_size)
dim3 grid((m + block_size - 1) / block_size,
(n + block_size - 1) / block_size);
dim3 block(block_size, block_size);
kernel<<<grid, block>>>(d_x, d_y, m, n);
cudaMemcpy(x.data(), d_x, sizeof(int)*m*n, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
bool all_twos = true;
for (size_t i = 0; i < x.size(); i++) {
if (x[i] != 2) {
printf("Breaking with %lu == %d\n", i, x[i]);
all_twos = false;
break;
}
}
if (all_twos) {
printf("OK!\n");
} else {
printf("FAILED.\n");
}
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
13,107 | #include<stdlib.h>
#include<stdio.h>
#include<math.h>
#include<cuda.h>
#include<time.h>
__global__
void updateKernel(float kappa, float G, float rc, float pi, int Ncell, int* bin_count, float* bin_atom_ln, int bin_atom_len, float* nl_list_ln, int cell_len)
{
int ic = blockIdx.x + blockIdx.y * gridDim.x;
int thId = threadIdx.x + blockDim.x * blockIdx.x + blockIdx.y * gridDim.x * blockDim.x;
int ip = thId - ic*27;
//num_threads - num_of_atoms_in_cutoff_sphere = 10;
int jn;
float Zc_ip, x_ip, y_ip, z_ip;
float Zc_jp, x_jp, y_jp, z_jp;
int c_ipart, c_jpart;
float n_x_sh, n_y_sh, n_z_sh;
float dx, dy, dz, r, f1, f2, f3, fr;
float G_r_k_p, G_r_k_m;
float sqt_pi = 1.7724538509055159;
if(ic < Ncell){
if(ip < (ic*37+bin_count[ic])){
//for(int ic = 0; ic < Ncell; ic++){
// for(int ip = 0; ip < bin_count[ic]; ip++){
//c_ipart = bin_atom_ln[ic*bin_atom_len+ip*8];
//Zc_ip = bin_atom_ln[ic*bin_atom_len+ip*8+1];
//x_ip = bin_atom_ln[ic*bin_atom_len+ip*8+2];
//y_ip = bin_atom_ln[ic*bin_atom_len+ip*8+3];
//z_ip = bin_atom_ln[ic*bin_atom_len+ip*8+4];
c_ipart = bin_atom_ln[ip*8];
Zc_ip = bin_atom_ln[ip*8+1];
x_ip = bin_atom_ln[ip*8+2];
y_ip = bin_atom_ln[ip*8+3];
z_ip = bin_atom_ln[ip*8+4];
for(int jc = 0; jc < 27; jc++){
jn = (int) nl_list_ln[ic*cell_len+jc*4];
for(int jp = 0; jp < bin_count[jn]; jp++){
c_jpart = bin_atom_ln[jn*bin_atom_len+jp*8];
Zc_jp = bin_atom_ln[jn*bin_atom_len+jp*8+1];
x_jp = bin_atom_ln[jn*bin_atom_len+jp*8+2];
y_jp = bin_atom_ln[jn*bin_atom_len+jp*8+3];
z_jp = bin_atom_ln[jn*bin_atom_len+jp*8+4];
n_x_sh = nl_list_ln[ic*cell_len+jc*4+1];
n_y_sh = nl_list_ln[ic*cell_len+jc*4+2];
n_z_sh = nl_list_ln[ic*cell_len+jc*4+3];
//if(ic == 0) {printf("c = %d, c_i = %d, c_j = %d\n", ic, c_ipart, c_jpart);}
if(c_ipart != c_jpart){
//if((ic == 0) && (ip == 0)) {printf("c = %d, c_i = %d, c_j = %d\n", ic, c_ipart, c_jpart);}
dx = x_ip - (x_jp + n_x_sh);
dy = y_ip - (y_jp + n_y_sh);
dz = z_ip - (z_jp + n_z_sh);
r = sqrt(dx*dx + dy*dy + dz*dz);
if(r < rc){
G_r_k_p = G*r + 0.5*kappa/G;
G_r_k_m = G*r - 0.5*kappa/G;
f1 = (0.5/(r*r)) * exp(kappa*r) * erfc(G*r + 0.5*kappa/G) * (1.0 - kappa*r);
f2 = (0.5/(r*r)) * exp(-kappa*r) * erfc(G*r - 0.5*kappa/G) * (1.0 + kappa*r);
f3 = (G/(sqt_pi*r)) * (exp(-G_r_k_p*G_r_k_p) * exp(kappa*r) + exp(-G_r_k_m*G_r_k_m) * exp(-kappa*r) );
fr = Zc_ip*Zc_jp*(f1 + f2 + f3);
//bin_atom_ln[ic*bin_atom_len+ip*8+5] = bin_atom_ln[ic*bin_atom_len+ip*8+5] + (fr*dx/r);
//bin_atom_ln[ic*bin_atom_len+ip*8+6] = bin_atom_ln[ic*bin_atom_len+ip*8+6] + (fr*dy/r);
//bin_atom_ln[ic*bin_atom_len+ip*8+7] = bin_atom_ln[ic*bin_atom_len+ip*8+7] + (fr*dz/r);
bin_atom_ln[ip*8+5] = bin_atom_ln[ip*8+5] + (fr*dx/r);
bin_atom_ln[ip*8+6] = bin_atom_ln[ip*8+6] + (fr*dy/r);
bin_atom_ln[ip*8+7] = bin_atom_ln[ip*8+7] + (fr*dz/r);
//bin_atom_ln[jn*bin_atom_len+jp*8+5] = bin_atom_ln[jn*bin_atom_len+jp*8+5] - (fr*dx/r);
//bin_atom_ln[jn*bin_atom_len+jp*8+6] = bin_atom_ln[jn*bin_atom_len+jp*8+6] - (fr*dy/r);
//bin_atom_ln[jn*bin_atom_len+jp*8+7] = bin_atom_ln[jn*bin_atom_len+jp*8+7] - (fr*dz/r);
}
}
}
}
}
}
}
void update(float kappa, float G, float rc, float pi, int Ncell, int* bin_count, float* bin_atom_ln, int bin_atom_len, float* nl_list_ln, int cell_len)
{
// int cN, cp, cxN, cyN, czN, cxsh, cysh, czsh;
//float rsh_x, rsh_y, rsh_z;
int jn;
float Zc_ip, x_ip, y_ip, z_ip;
float Zc_jp, x_jp, y_jp, z_jp;
int c_ipart, c_jpart;
float n_x_sh, n_y_sh, n_z_sh;
float dx, dy, dz, r, f1, f2, f3, fr;
for(int ic = 0; ic < Ncell; ic++){
for(int ip = 0; ip < bin_count[ic]; ip++){
c_ipart = bin_atom_ln[ic*bin_atom_len+ip*8];
Zc_ip = bin_atom_ln[ic*bin_atom_len+ip*8+1];
x_ip = bin_atom_ln[ic*bin_atom_len+ip*8+2];
y_ip = bin_atom_ln[ic*bin_atom_len+ip*8+3];
z_ip = bin_atom_ln[ic*bin_atom_len+ip*8+4];
for(int jc = 0; jc < 27; jc++){
jn = (int) nl_list_ln[ic*cell_len+jc*4];
for(int jp = 0; jp < bin_count[jn]; jp++){
c_jpart = bin_atom_ln[jn*bin_atom_len+jp*8];
Zc_jp = bin_atom_ln[jn*bin_atom_len+jp*8+1];
x_jp = bin_atom_ln[jn*bin_atom_len+jp*8+2];
y_jp = bin_atom_ln[jn*bin_atom_len+jp*8+3];
z_jp = bin_atom_ln[jn*bin_atom_len+jp*8+4];
n_x_sh = nl_list_ln[ic*cell_len+jc*4+1];
n_y_sh = nl_list_ln[ic*cell_len+jc*4+2];
n_z_sh = nl_list_ln[ic*cell_len+jc*4+3];
//if(ic == 0) {printf("c = %d, c_i = %d, c_j = %d\n", ic, c_ipart, c_jpart);}
if(c_ipart < c_jpart){
//if(ic == 0) {printf("c = %d, c_i = %d, c_j = %d\n", ic, c_ipart, c_jpart);}
dx = x_ip - (x_jp + n_x_sh);
dy = y_ip - (y_jp + n_y_sh);
dz = z_ip - (z_jp + n_z_sh);
r = sqrt(dx*dx + dy*dy + dz*dz);
if(r < rc){
f1 = (0.5/(r*r)) * exp(kappa*r) * erfc(G*r + 0.5*kappa/G) * (1.0 - kappa*r);
f2 = (0.5/(r*r)) * exp(-kappa*r) * erfc(G*r - 0.5*kappa/G) * (1.0 + kappa*r);
f3 = (G/(sqrt(pi)*r)) * (exp(-pow((G*r + 0.5*kappa/G),2)) * exp(kappa*r) + exp(-pow((G*r - 0.5*kappa/G),2)) * exp(-kappa*r) );
fr = Zc_ip*Zc_jp*(f1 + f2 + f3);
bin_atom_ln[ic*bin_atom_len+ip*8+5] = bin_atom_ln[ic*bin_atom_len+ip*8+5] + (fr*dx/r);
bin_atom_ln[ic*bin_atom_len+ip*8+6] = bin_atom_ln[ic*bin_atom_len+ip*8+6] + (fr*dy/r);
bin_atom_ln[ic*bin_atom_len+ip*8+7] = bin_atom_ln[ic*bin_atom_len+ip*8+7] + (fr*dz/r);
//bin_atom_ln[ip*8+5] = bin_atom_ln[ip*8+5] + (fr*dx/r);
//bin_atom_ln[ip*8+6] = bin_atom_ln[ip*8+6] + (fr*dy/r);
//bin_atom_ln[ip*8+7] = bin_atom_ln[ip*8+7] + (fr*dz/r);
bin_atom_ln[jn*bin_atom_len+jp*8+5] = bin_atom_ln[jn*bin_atom_len+jp*8+5] - (fr*dx/r);
bin_atom_ln[jn*bin_atom_len+jp*8+6] = bin_atom_ln[jn*bin_atom_len+jp*8+6] - (fr*dy/r);
bin_atom_ln[jn*bin_atom_len+jp*8+7] = bin_atom_ln[jn*bin_atom_len+jp*8+7] - (fr*dz/r);
}
}
}
}
}
}
}
int main()
{
float kappa, G;
float L, rc;
float rcx, rcy, rcz;
float diff;
struct timespec start, end;
float diff_c;
struct timespec start_c, end_c;
int ipart;
int i, j;
int c, cx, cy, cz;
int N = 10000000;
float const pi = 3.141592653589793;
float const emp = -50;
float **pos = (float **)malloc(N*sizeof(float *));
for(i = 0; i < N; i++){
pos[i] = (float *)malloc(3*sizeof(float));
}
float *Z = (float *)malloc(N*sizeof(float));
for(i = 0; i < N; i++){
Z[i] = 1.0;
}
FILE *file;
file = fopen("pos_1e7.txt", "r");
for(i = 0; i < N; i++){
for(j = 0; j < 3; j++){
if(!fscanf(file, "%f", &pos[i][j]))
break;
}
}
fclose(file);
kappa = 0.1;
G = 0.5;
rc = 3.0;
L = pow(4.0 * pi * N/3.0, 1.0/3.0);
printf("L = %f\n", L);
float Lx = L;
float Ly = L;
float Lz = L;
int Lxd = (int) floor(Lx/rc);
int Lyd = (int) floor(Ly/rc);
int Lzd = (int) floor(Lz/rc);
int Ncell = Lxd*Lyd*Lzd;
printf("%d %d %d %d\n", Lxd, Lyd, Lzd, Ncell);
rcx = Lx/Lxd;
rcy = Ly/Lyd;
rcz = Lz/Lzd;
printf("%f %f %f\n",rcx, rcy, rcz);
int bin_atom_c = (int) (1* pow(rc,3) + 10.0);
printf("%d\n",bin_atom_c);
const int atm_len = 8;
int bin_atom_len = atm_len * bin_atom_c;
float bin_atom[Ncell][atm_len*bin_atom_c];
//float bin_atom_ln[Ncell*bin_atom_len];
float *bin_atom_ln = (float *)malloc(Ncell*bin_atom_len*sizeof(float));
for(i = 0; i < Ncell; i++){
for(j = 0; j < bin_atom_c; j++){
bin_atom[i][j] = emp;
bin_atom_ln[i*bin_atom_len + j] = emp;
}
}
int *bin_count = (int *)malloc(Ncell*sizeof(int));
int bcount, bin_idx;
for(i = 0; i < Ncell; i++){
bin_count[i] = 0;
}
printf("bin_count[1] = %d\n", bin_count[1]);
for(ipart = 0; ipart < N; ipart++){
cx = (int) floor(pos[ipart][0]/rcx);
cy = (int) floor(pos[ipart][1]/rcy);
cz = (int) floor(pos[ipart][2]/rcz);
c = cx + cy*Lxd + cz*Lxd*Lyd;
//if(c == 1){printf("c = %d, ipart = %d, bin_count[%d] = %d\n", c, ipart, c, bin_count[c]);}
bcount = bin_count[c];
bin_idx = atm_len*bcount;
bin_atom[c][bin_idx] = ipart;
//if(c == 1){printf("c = %d, ipart = %d, bin_count[%d] = %d, bin_idx = %d, bin_atom[%d][bin_idx] = %f\n", c, ipart, c, bin_count[c], bin_idx, c, bin_atom[c][bin_idx]);}
bin_atom[c][bin_idx+1] = Z[ipart];
bin_atom[c][bin_idx+2] = pos[ipart][0];
bin_atom[c][bin_idx+3] = pos[ipart][1];
bin_atom[c][bin_idx+4] = pos[ipart][2];
bin_atom[c][bin_idx+5] = 0.0;
bin_atom[c][bin_idx+6] = 0.0;
bin_atom[c][bin_idx+7] = 0.0;
bin_atom_ln[c*bin_atom_len+bin_idx] = ipart;
//if(c == 1){printf("c = %d, ipart = %d, bin_count[%d] = %d, bin_idx = %d, bin_atom[%d][bin_idx] = %f\n", c, ipart, c, bin_count[c], bin_idx, c, bin_atom[c][bin_idx]);}
bin_atom_ln[c*bin_atom_len+bin_idx+1] = Z[ipart];
bin_atom_ln[c*bin_atom_len+bin_idx+2] = pos[ipart][0];
bin_atom_ln[c*bin_atom_len+bin_idx+3] = pos[ipart][1];
bin_atom_ln[c*bin_atom_len+bin_idx+4] = pos[ipart][2];
bin_atom_ln[c*bin_atom_len+bin_idx+5] = 0.0;
bin_atom_ln[c*bin_atom_len+bin_idx+6] = 0.0;
bin_atom_ln[c*bin_atom_len+bin_idx+7] = 0.0;
bin_count[c] += 1;
}
int chk = 0;
printf("bin_count[%d] = %d\n", chk, bin_count[chk]);
for(i = 0; i < bin_count[chk]; i++){
//printf("c = %d, ipart = %f, Z = %f, x = %f, y = %f, z = %f\n",chk, bin_atom[chk][i*8], bin_atom[chk][i*8+1], bin_atom[chk][i*8+2], bin_atom[chk][i*8+3], bin_atom[chk][i*8+4], bin_atom[chk][i*8+5], bin_atom[chk][i*8+6], bin_atom[chk][i*8+7]);
}
printf("-------\n");
for(i = 0; i < bin_count[chk]; i++){
//printf("c = %d, ipart = %f, Z = %f, x = %f, y = %f, z = %f\n",chk, bin_atom_ln[chk*bin_atom_len + i*8], bin_atom_ln[chk*bin_atom_len + i*8+1], bin_atom_ln[chk*bin_atom_len + i*8+2], bin_atom_ln[chk*bin_atom_len+i*8+3], bin_atom_ln[chk*bin_atom_len+i*8+4], bin_atom_ln[chk*bin_atom_len+i*8+5], bin_atom_ln[chk*bin_atom_len+i*8+6], bin_atom_ln[chk*bin_atom_len+i*8+7]);
}
int cN, cp, cxN, cyN, czN, cxsh, cysh, czsh;
float rsh_x, rsh_y, rsh_z;
float nl_list[Ncell][27*4];
//float nl_list_ln[Ncell*27*4];
float *nl_list_ln = (float *)malloc(Ncell*27*4*sizeof(float));
const int cell_len = 27*4;
int n_ct;
for(c = 0; c < Ncell; c++){
cz = c/(Lxd*Lyd);
cp = c % (Lxd*Lyd);
cy = cp/Lxd;
cx = cp % Lxd;
n_ct = 0;
for(czN = cz-1; czN < cz+2; czN++){
if(czN < 0){
czsh = Lzd;
rsh_z = -Lz;
}
else if(czN >= Lzd){
czsh = -Lzd;
rsh_z = Lz;
}
else{
czsh = 0;
rsh_z = 0;
}
for(cyN = cy-1; cyN < cy+2; cyN++){
if(cyN < 0){
cysh = Lyd;
rsh_y = -Ly;
}
else if(cyN >= Lyd){
cysh = -Lyd;
rsh_y = Ly;
}
else{
cysh = 0;
rsh_y = 0;
}
for(cxN = cx-1; cxN < cx+2; cxN++){
if(cxN < 0){
cxsh = Lxd;
rsh_x = -Lx;
}
else if(cxN >= Lxd){
cxsh = -Lxd;
rsh_x = Lx;
}
else{
cxsh = 0;
rsh_x = 0;
}
cN = cxN + cxsh + (cyN + cysh)*Lxd + (czN + czsh)*Lxd*Lyd;
nl_list[c][n_ct] = cN;
nl_list[c][n_ct+1] = rsh_x;
nl_list[c][n_ct+2] = rsh_y;
nl_list[c][n_ct+3] = rsh_z;
nl_list_ln[c*cell_len+n_ct] = cN;
nl_list_ln[c*cell_len+n_ct+1] = rsh_x;
nl_list_ln[c*cell_len+n_ct+2] = rsh_y;
nl_list_ln[c*cell_len+n_ct+3] = rsh_z;
n_ct += 4;
}
}
}
}
int nchk = 56;
for(int inl = 0; inl < 27; inl++){
//printf("c = %d, cN = %f, rxsh = %f, rysh = %f, rzsh = %f\n", nchk, nl_list[nchk][inl*4], nl_list[nchk][inl*4+1], nl_list[nchk][inl*4+2], nl_list[nchk][inl*4+3]);
//printf("c = %d, cN = %f, rxsh = %f, rysh = %f, rzsh = %f\n", nchk, nl_list_ln[nchk*cell_len+inl*4], nl_list_ln[nchk*cell_len+inl*4+1], nl_list_ln[nchk*cell_len+inl*4+2], nl_list_ln[nchk*cell_len+inl*4+3]);
}
printf("-----------\n");
for(int inl = 0; inl < 27; inl++){
//printf("c = %d, cN = %f, rxsh = %f, rysh = %f, rzsh = %f\n", nchk, nl_list[nchk][inl*4], nl_list[nchk][inl*4+1], nl_list[nchk][inl*4+2], nl_list[nchk][inl*4+3]);
//printf("c = %d, cN = %f, rxsh = %f, rysh = %f, rzsh = %f\n", nchk, nl_list_ln[nchk*cell_len+inl*4], nl_list_ln[nchk*cell_len+inl*4+1], nl_list_ln[nchk*cell_len+inl*4+2], nl_list_ln[nchk*cell_len+inl*4+3]);
}
clock_gettime(CLOCK_MONOTONIC, &start_c);
int jn;
float Zc_ip, x_ip, y_ip, z_ip;
float Zc_jp, x_jp, y_jp, z_jp;
int c_ipart, c_jpart;
float n_x_sh, n_y_sh, n_z_sh;
float dx, dy, dz, r, f1, f2, f3, fr;
int ip_cn = 0;
for(int ic = 0; ic < Ncell; ic++){
for(int ip = 0; ip < bin_count[ic]; ip++){
c_ipart = bin_atom[ic][ip*8];
Zc_ip = bin_atom[ic][ip*8+1];
x_ip = bin_atom[ic][ip*8+2];
y_ip = bin_atom[ic][ip*8+3];
z_ip = bin_atom[ic][ip*8+4];
for(int jc = 0; jc < 27; jc++){
jn = (int) nl_list[ic][jc*4];
for(int jp = 0; jp < bin_count[jn]; jp++){
c_jpart = bin_atom[jn][jp*8];
Zc_jp = bin_atom[jn][jp*8+1];
x_jp = bin_atom[jn][jp*8+2];
y_jp = bin_atom[jn][jp*8+3];
z_jp = bin_atom[jn][jp*8+4];
n_x_sh = nl_list[ic][jc*4+1];
n_y_sh = nl_list[ic][jc*4+2];
n_z_sh = nl_list[ic][jc*4+3];
//if(ic == 0) {printf("c = %d, c_i = %d, c_j = %d\n", ic, c_ipart, c_jpart);}
if(c_ipart != c_jpart){
//if((ic == 0) && (ip == 0)) {printf("c = %d, c_i = %d, c_j = %d\n", ic, c_ipart, c_jpart); ip_cn += 1;}
dx = x_ip - (x_jp + n_x_sh);
dy = y_ip - (y_jp + n_y_sh);
dz = z_ip - (z_jp + n_z_sh);
r = sqrt(dx*dx + dy*dy + dz*dz);
if(r < rc){
f1 = (0.5/(r*r)) * exp(kappa*r) * erfc(G*r + 0.5*kappa/G) * (1.0 - kappa*r);
f2 = (0.5/(r*r)) * exp(-kappa*r) * erfc(G*r - 0.5*kappa/G) * (1.0 + kappa*r);
f3 = (G/(sqrt(pi)*r)) * (exp(-pow((G*r + 0.5*kappa/G),2)) * exp(kappa*r) + exp(-pow((G*r - 0.5*kappa/G),2)) * exp(-kappa*r) );
fr = Zc_ip*Zc_jp*(f1 + f2 + f3);
bin_atom[ic][ip*8+5] = bin_atom[ic][ip*8+5] + (fr*dx/r);
bin_atom[ic][ip*8+6] = bin_atom[ic][ip*8+6] + (fr*dy/r);
bin_atom[ic][ip*8+7] = bin_atom[ic][ip*8+7] + (fr*dz/r);
//bin_atom[jn][jp*8+5] = bin_atom[jn][jp*8+5] - (fr*dx/r);
//bin_atom[jn][jp*8+6] = bin_atom[jn][jp*8+6] - (fr*dy/r);
//bin_atom[jn][jp*8+7] = bin_atom[jn][jp*8+7] - (fr*dz/r);
}
}
}
}
}
}
clock_gettime(CLOCK_MONOTONIC, &end_c);
diff_c = (end_c.tv_sec - start_c.tv_sec)*1000000.0 + (end_c.tv_nsec - start_c.tv_nsec)/1000.0;
printf("elapsed time = %lf micro-seconds\n", diff_c);
printf("no. of particles for c = 0, ip = 0: %d\n", ip_cn);
//update(kappa, G, rc, pi, Ncell, bin_count, bin_atom_ln, bin_atom_len, nl_list_ln, cell_len);
for(int cchk = 0; cchk < Ncell; cchk++){
for(i = 0; i < bin_count[cchk]; i++){
if(bin_atom[cchk][i*8] < 10)
{printf("c = %d, ipart = %f, Z = %f, x = %f, y = %f, z = %f, ax = %f, ay = %f, az = %f\n",cchk, bin_atom[cchk][i*8], bin_atom[cchk][i*8+1], bin_atom[cchk][i*8+2], bin_atom[cchk][i*8+3], bin_atom[cchk][i*8+4], bin_atom[cchk][i*8+5], bin_atom[cchk][i*8+6], bin_atom[cchk][i*8+7]);}
}
}
printf("-------------------\n");
for(int cchk = 0; cchk < Ncell; cchk++){
for(i = 0; i < bin_count[cchk]; i++){
if(bin_atom[cchk][i*8] > (N-10))
{printf("c = %d, ipart = %f, Z = %f, x = %f, y = %f, z = %f, ax = %f, ay = %f, az = %f\n",cchk, bin_atom[cchk][i*8], bin_atom[cchk][i*8+1], bin_atom[cchk][i*8+2], bin_atom[cchk][i*8+3], bin_atom[cchk][i*8+4], bin_atom[cchk][i*8+5], bin_atom[cchk][i*8+6], bin_atom[cchk][i*8+7]);}
}
}
printf("-------------\n");
float *d_bin_atom_ln, *d_nl_list_ln;
int *d_bin_count;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMalloc((void **) &d_bin_count, Ncell*sizeof(int));
cudaMalloc((void **) &d_bin_atom_ln, Ncell*bin_atom_len*sizeof(float));
cudaMalloc((void **) &d_nl_list_ln, Ncell*27*4*sizeof(float));
dim3 dimGrid(512, ceil(Ncell/512.0), 1);
dim3 dimBlock(64, 1, 1);
//clock_gettime(CLOCK_MONOTONIC, &start);
cudaMemcpy(d_bin_count, bin_count, Ncell*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_bin_atom_ln, bin_atom_ln, Ncell*bin_atom_len*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_nl_list_ln, nl_list_ln, Ncell*27*4*sizeof(float), cudaMemcpyHostToDevice);
//clock_gettime(CLOCK_MONOTONIC, &start);
updateKernel<<<dimGrid, dimBlock>>>(kappa, G, rc, pi, Ncell, d_bin_count, d_bin_atom_ln, bin_atom_len, d_nl_list_ln, cell_len);
//updateKernel<<<ceil(Ncell/64), 64>>>(kappa, G, rc, pi, Ncell, d_bin_count, d_bin_atom_ln, bin_atom_len, d_nl_list_ln, cell_len);
//updateKernel<<<ceil(Ncell/128.0), 128>>>();
cudaDeviceSynchronize();
//clock_gettime(CLOCK_MONOTONIC, &end);
//diff = (end.tv_sec - start.tv_sec)*1000000.0 + (end.tv_nsec - start.tv_nsec)/1000.0;
//printf("elapsed time = %lf micro-seconds\n", diff);
cudaMemcpy(bin_atom_ln, d_bin_atom_ln, Ncell*bin_atom_len*sizeof(float), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &end);
diff = (end.tv_sec - start.tv_sec)*1000000.0 + (end.tv_nsec - start.tv_nsec)/1000.0;
printf("elapsed time = %lf micro-seconds\n", diff);
for(int cchk = 0; cchk < Ncell; cchk++){
for(i = 0; i < bin_count[cchk]; i++){
if(bin_atom_ln[cchk*bin_atom_len+i*8] < 10)
{printf("c = %d, ipart = %f, Z = %f, x = %f, y = %f, z = %f, ax = %f, ay = %f, az = %f\n",cchk, bin_atom_ln[cchk*bin_atom_len+i*8], bin_atom_ln[cchk*bin_atom_len+i*8+1], bin_atom_ln[cchk*bin_atom_len+i*8+2], bin_atom_ln[cchk*bin_atom_len+i*8+3], bin_atom_ln[cchk*bin_atom_len+i*8+4], bin_atom_ln[cchk*bin_atom_len+i*8+5], bin_atom_ln[cchk*bin_atom_len+i*8+6], bin_atom_ln[cchk*bin_atom_len+i*8+7]);}
}
}
printf("----------------\n");
for(int cchk = 0; cchk < Ncell; cchk++){
for(i = 0; i < bin_count[cchk]; i++){
if(bin_atom_ln[cchk*bin_atom_len+i*8] > (N-10))
{printf("c = %d, ipart = %f, Z = %f, x = %f, y = %f, z = %f, ax = %f, ay = %f, az = %f\n",cchk, bin_atom_ln[cchk*bin_atom_len+i*8], bin_atom_ln[cchk*bin_atom_len+i*8+1], bin_atom_ln[cchk*bin_atom_len+i*8+2], bin_atom_ln[cchk*bin_atom_len+i*8+3], bin_atom_ln[cchk*bin_atom_len+i*8+4], bin_atom_ln[cchk*bin_atom_len+i*8+5], bin_atom_ln[cchk*bin_atom_len+i*8+6], bin_atom_ln[cchk*bin_atom_len+i*8+7]);}
}
}
cudaFree(d_bin_atom_ln); cudaFree(d_nl_list_ln); cudaFree(d_bin_count);
free(pos); free(Z);
free(bin_count); free(bin_atom_ln); free(nl_list_ln);
return 0;
}
|
13,108 | #include "includes.h"
#define NUM_THREADS 511
#define ITERATIONS 100000
using namespace std;
__global__ void kernel_map(int *values, int *next_values)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < NUM_THREADS)
{
next_values[tid] = values[tid] + 1;
}
} |
13,109 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <iostream>
#include <math.h>
#include <thrust/iterator/constant_iterator.h>
int main() {
thrust::device_vector<double> AAPL;
thrust::device_vector<double> MSFT;
double stocks_AAPL, stocks_MSFT, mean, var;
while(std::cin){
std::cin >> stocks_AAPL >> stocks_MSFT;
AAPL.push_back(stocks_AAPL);
MSFT.push_back(stocks_MSFT);
}
int N = AAPL.size();
thrust::device_vector<double> MEAN_DIF(N);
thrust::transform(AAPL.begin(), AAPL.end(), MSFT.begin(), MEAN_DIF.begin(), thrust::minus<double>());
double val = thrust::reduce(MEAN_DIF.begin(), MEAN_DIF.end());
mean = val/2517;
std::cout << "Média: " << fabs(mean) << "\n";
// thrust::device_vector<double> MV(N, mean);
thrust::device_vector<double> MV_DIF(N);
thrust::transform(MEAN_DIF.begin(), MEAN_DIF.end(), thrust::constant_iterator<double>(mean), MV_DIF.begin(), thrust::minus<double>());
// thrust::transform(MEAN_DIF.begin(), MEAN_DIF.end(), MV.begin(), MV_DIF.begin(), thrust::minus<double>());
thrust::transform(MV_DIF.begin(), MV_DIF.end(), MV_DIF.begin(), MV_DIF.begin(), thrust::multiplies<double>());
double val2 = thrust::reduce(MV_DIF.begin(), MV_DIF.end());
var = val2/N;
std::cout << "Variância: " << var << "\n";
}
|
13,110 |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define TILE_WIDTH 16
void matmulCPU(float *a, float *b, float *r, int n);
__global__ void matmulGPU(float *a, float *b, float *r, int n);
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
int main(int argc, char* argv[]){
if (argc < 2) {
puts("Usage: matmul [N]");
return 0;
}
int N = atoi(argv[1]);
printf("N: %d\n", N);
//Total size
size_t sz = sizeof(float) * N * N;
//Struct for time measure
struct timeval start, end, timer;
//Memory allocation for cpu(host)
float *h_a = (float*)malloc(sz);
float *h_b = (float*)malloc(sz);
float *h_r = (float*)malloc(sz);
srand(time(NULL));
for(int i=0; i<N*N; i++) {
h_a[i] = (float)(rand()%100);
h_b[i] = (float)(rand()%100);
h_r[i] = 0;
}
//Memory alocation for gpu(device)
float *d_a, *d_b, *d_r;
cudaMalloc((void **) &d_a, sz);
cudaMalloc((void **) &d_b, sz);
cudaMalloc((void **) &d_r, sz);
float *h_result_global = (float*)malloc(sz);
gettimeofday(&start, NULL);
matmulCPU(h_a, h_b, h_r, N);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("CPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
int threads_width = 16;
int grid_width = N % threads_width ? N / threads_width + 1 : N / threads_width;
dim3 dim_threads(threads_width, threads_width);
dim3 dim_grid(grid_width, grid_width);
gettimeofday(&start, NULL);
cudaMemcpy(d_a, h_a, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sz, cudaMemcpyHostToDevice);
MatrixMulKernel<<<dim_grid, dim_threads>>>(d_a, d_b, d_r, N);
cudaDeviceSynchronize();
cudaMemcpy(h_result_global, d_r, sz, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("GPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
for (int i = 0; i<N*N; i++){
if(h_r[i] != h_result_global[i]){
printf("Failed at %d, h_result_global, %f, %f\n", i, h_r[i], h_result_global[i]);
break;
}
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_r);
free(h_result_global);
free(h_a);
free(h_b);
free(h_r);
return 0;
}
void matmulCPU(float *a, float *b, float *r, int n){
int i=0,j=0,x=0;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
float sum = 0.0f;
for(x=0;x<n;x++){
sum+=a[j*n + x] * b[x * n + i];
}
r[j*n + i] = sum;
}
}
}
__global__ void matmulGPU(float *a, float *b, float *r, int n){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x>=n || y>=n)
return;
float sum = 0;
for(int i=0;i<n;i++)
sum+=(a[y*n +i] * b[i*n+x]);
r[y*n+x] = sum;
}
__global__ void MatrixMulKernel(float*Md, float* Nd, float* Pd, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if(Row >= Width || Col >= Width)
return;
float Pvalue;
if(tx == 0 || ty == 0)
Pvalue = 0;
int num_tile = Width % TILE_WIDTH == 0 ? Width / TILE_WIDTH : Width / TILE_WIDTH + 1;
for(int m=0;m<num_tile;++m){
Mds[ty][tx] = Md[Row * Width + (tx + m * TILE_WIDTH)];
Nds[ty][tx] = Nd[Col + (m * TILE_WIDTH + ty) * Width];
__syncthreads();
int var_tile_width = m == (num_tile - 1) ? Width % TILE_WIDTH : TILE_WIDTH;
for(int k=0; k<var_tile_width;++k){
Pvalue+=Mds[ty][k]*Nds[k][tx];
}
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
|
13,111 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define DEFAULT_WIDTH 1024
#define DEFAULT_HEIGHT 1024
#define DEFAULT_INTWNDW 32
#define DEFAULT_INTWNDH 32
#define DEFAULT_SRHWNDL 12
#define DEFAULT_SRHWNDR 12
#define DEFAULT_SRHWNDA 12
#define DEFAULT_SRHWNDB 12
__host__ void getData(int size, unsigned char *data0, unsigned char *data1, char *path0, char *path1);
__global__ void vectorComputation(unsigned char *f, unsigned char *g, int width, int totalVectorsX, int totalVectorsY, char m, char n, char l, char r, char a, char b, char ww, char wh, char fw, char fh, char stepX, char stepY, float *R_posSubX, float *R_posSubY, float *startX, float *startY, float *endX, float *endY);
__device__ void getData0intoSharedMemory(unsigned char *f, unsigned char *sh_f, int width, char m, char n, char l, char a, int startPixelX, int startPixelY, char stepX, char stepY);
__device__ void getData1intoSharedMemory(unsigned char *g, unsigned char *sh_g, int width, char fw, char fh, int startPixelX, int startPixelY, char stepX, char stepY);
__device__ void calculateR(unsigned char *sh_f, unsigned char *sh_g, float *R, char *R_posX, char *R_posY, char m, char n, char ww, char wh, char fw, char fh);
__device__ void bitonicSort(float *R, char *R_posX, char *R_posY);
__device__ void swapFloat(float *x, float *y);
__device__ void swapChar(char *x, char *y);
__device__ void subPixelAccuracy(int vectorID, float *R, char *R_posX, char *R_posY, float *subXN, float *subXD, float *subYN, float *subYD, float *R_posSubX, float *R_posSubY, char l, char a, float *endX, float *endY);
__global__ void vectorCorrection(int totalVectorsX, float *R_posSubX, float *R_posSubY, float *endX, float *endY);
__host__ void printSummary(int width, int height, char intWndW, char intWndH, char srhWndL, char srhWndR, char srhWndA, char srhWndB, char stepX, char stepY, int totalVectorsX, int totalVectorsY, int totalVectors, int Dbx, int Dby, int Dgx, int Dgy, float h_time, float d_time);
__host__ void putData(float *startX, float *startY, float *endX, float *endY, int totalVectors, char *path);
__host__ void putDataVTK(int height, float *startX, float *startY, float *endX, float *endY, int number, int numberX, int numberY, char *pathVTK);
int main(int argc, char *argv[]) {
int width = DEFAULT_WIDTH, height = DEFAULT_HEIGHT;
int size = width * height;
unsigned char *h_data0, *h_data1, *d_data0, *d_data1;
char intWndW = (char)DEFAULT_INTWNDW,
intWndH = (char)DEFAULT_INTWNDH,
srhWndL = (char)DEFAULT_SRHWNDL,
srhWndR = (char)DEFAULT_SRHWNDR,
srhWndA = (char)DEFAULT_SRHWNDA,
srhWndB = (char)DEFAULT_SRHWNDB,
srhWndW = srhWndL + srhWndR + 1,
srhWndH = srhWndA + srhWndB + 1,
srhFldW = srhWndL + srhWndR + intWndW,
srhFldH = srhWndL + srhWndR + intWndH;
char stepX, stepY;
float *h_startX, *h_startY, *d_startX, *d_startY;
float *h_endX, *h_endY, *d_endX, *d_endY;
float *R_posSubX, *R_posSubY;
int totalVectorsX, totalVectorsY, totalVectors;
char *inputFileName0 = "2ms2000018",
*inputFileName1 = "2ms2000019",
*inputFileExtension = "raw",
*outputFileName = "result",
*outputFileExtension = "dat";
char inputFilepath0[64], inputFilepath1[64], outputFilepath[64], outputFilePathVTK[64];
clock_t h_start, h_end, d_start, d_end;
float h_time, d_time;
cudaError_t cudaError;
dim3 Dg, Db;
int Dgx, Dgy, Dbx, Dby;
h_data0 = (unsigned char *)malloc(sizeof(unsigned char) * size);
h_data1 = (unsigned char *)malloc(sizeof(unsigned char) * size);
cudaMalloc((void **)&d_data0, sizeof(unsigned char) * size);
cudaMalloc((void **)&d_data1, sizeof(unsigned char) * size);
stepX = (char)(intWndW / 2);
stepY = (char)(intWndH / 2);
totalVectorsX = (int)((width - srhFldW) / stepX) + 1;
totalVectorsY = (int)((height - srhFldH) / stepY) + 1;
totalVectors = totalVectorsX * totalVectorsY;
h_startX = (float *)malloc(sizeof(float) * totalVectors);
h_startY = (float *)malloc(sizeof(float) * totalVectors);
h_endX = (float *)malloc(sizeof(float) * totalVectors);
h_endY = (float *)malloc(sizeof(float) * totalVectors);
cudaMalloc((void **)&d_startX, sizeof(float) * totalVectors);
cudaMalloc((void **)&d_startY, sizeof(float) * totalVectors);
cudaMalloc((void **)&d_endX, sizeof(float) * totalVectors);
cudaMalloc((void **)&d_endY, sizeof(float) * totalVectors);
cudaMalloc((void **)&R_posSubX, sizeof(float) * totalVectors * 5);
cudaMalloc((void **)&R_posSubY, sizeof(float) * totalVectors * 5);
sprintf(inputFilepath0, "%s.%s", inputFileName0, inputFileExtension);
sprintf(inputFilepath1, "%s.%s", inputFileName1, inputFileExtension);
sprintf(outputFilepath, "%s.%s", outputFileName, outputFileExtension);
sprintf(outputFilePathVTK, "%s-vtk.vtk", outputFileName);
Db.x = 32;
Db.y = 32;
Dg.x = totalVectorsX;
Dg.y = totalVectorsY;
Dbx = Db.x; Dby = Db.y;
Dgx = Dg.x; Dgy = Dg.y;
getData(size, h_data0, h_data1, inputFilepath0, inputFilepath1);
h_start = clock();
cudaMemcpy(d_data0, h_data0, sizeof(unsigned char) * size, cudaMemcpyHostToDevice);
cudaMemcpy(d_data1, h_data1, sizeof(unsigned char) * size, cudaMemcpyHostToDevice);
d_start = clock();
vectorComputation<<<Dg, Db>>>(d_data0, d_data1, width, totalVectorsX, totalVectorsY, intWndW, intWndH, srhWndL, srhWndR, srhWndA, srhWndB, srhWndW, srhWndH, srhFldW, srhFldH, stepX, stepY, R_posSubX, R_posSubY, d_startX, d_startY, d_endX, d_endY);
cudaThreadSynchronize();
vectorCorrection<<<Dg, Db>>>(totalVectorsX, R_posSubX, R_posSubY, d_endX, d_endY);
cudaThreadSynchronize();
d_end = clock();
cudaMemcpy(h_startX, d_startX, sizeof(float) * totalVectors, cudaMemcpyDeviceToHost);
cudaMemcpy(h_startY, d_startY, sizeof(float) * totalVectors, cudaMemcpyDeviceToHost);
cudaMemcpy(h_endX, d_endX, sizeof(float) * totalVectors, cudaMemcpyDeviceToHost);
cudaMemcpy(h_endY, d_endY, sizeof(float) * totalVectors, cudaMemcpyDeviceToHost);
h_end = clock();
h_time = (float)(h_end - h_start) / CLOCKS_PER_SEC;
d_time = (float)(d_end - d_start) / CLOCKS_PER_SEC;
printSummary(width, height, intWndW, intWndH, srhWndL, srhWndR, srhWndA, srhWndB, stepX, stepY, totalVectorsX, totalVectorsY, totalVectors, Dbx, Dby, Dgx, Dgy, h_time, d_time);
putData(h_startX, h_startY, h_endX, h_endY, totalVectors, outputFilepath);
putDataVTK(height, h_startX, h_startY, h_endX, h_endY, totalVectors, totalVectorsX, totalVectorsY, outputFilePathVTK);
free(h_data0);
free(h_data1);
cudaFree(d_data0);
cudaFree(d_data1);
free(h_startX);
free(h_startY);
free(h_endX);
free(h_endY);
cudaFree(d_startX);
cudaFree(d_startY);
cudaFree(d_endX);
cudaFree(d_endY);
cudaFree(R_posSubX);
cudaFree(R_posSubY);
cudaError = cudaGetLastError();
if(cudaError != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n\n", cudaGetErrorString(cudaError));
exit(-1);
} else {
printf("CUDA success\n\n");
}
return 0;
}
__host__ void getData(int size, unsigned char *data0, unsigned char *data1, char *path0, char *path1) {
int i;
FILE *fp;
fp = fopen(path0, "rb");
if (fp == NULL) {
printf("%s file open error\n", path0);
}
for (i = 0; i < size; i++) {
data0[i] = fgetc(fp);
}
fclose(fp);
fp = fopen(path1, "rb");
if (fp == NULL) {
printf("%s file open error\n", path1);
}
for (i = 0; i < size; i++) {
data1[i] = fgetc(fp);
}
fclose(fp);
}
__global__ void vectorComputation(unsigned char *f, unsigned char *g, int width, int totalVectorsX, int totalVectorsY, char m, char n, char l, char r, char a, char b, char ww, char wh, char fw, char fh, char stepX, char stepY, float *R_posSubX, float *R_posSubY, float *startX, float *startY, float *endX, float *endY) {
int vectorID;
int startPixelX, startPixelY;
__shared__ unsigned char
sh_f[2304], sh_g[12100];
__shared__ float
R[4096];
__shared__ char
R_posX[4096], R_posY[4096];
__shared__ float
subXN[5], subXD[5], subYN[5], subYD[5];
vectorID = blockIdx.y * gridDim.x + blockIdx.x;
startPixelX = stepX * blockIdx.x;
startPixelY = stepY * blockIdx.y;
startX[vectorID] = startPixelX + l + (m / 2) + (float)0.5;
startY[vectorID] = startPixelY + a + (n / 2) + (float)0.5;
endX[vectorID] = 0;
endY[vectorID] = 0;
getData0intoSharedMemory(f, sh_f, width, m, n, l, a, startPixelX, startPixelY, stepX, stepY);
getData1intoSharedMemory(g, sh_g, width, fw, fh, startPixelX, startPixelY, stepX, stepY);
__syncthreads();
calculateR(sh_f, sh_g, R, R_posX, R_posY, m, n, ww, wh, fw, fh);
__syncthreads();
bitonicSort(R, R_posX, R_posY);
__syncthreads();
subPixelAccuracy(vectorID, R, R_posX, R_posY, subXN, subXD, subYN, subYD, R_posSubX, R_posSubY, l, a, endX, endY);
__syncthreads();
}
__device__ void getData0intoSharedMemory(unsigned char *f, unsigned char *sh_f, int width, char m, char n, char l, char a, int startPixelX, int startPixelY, char stepX, char stepY) {
char j, i;
for (j = threadIdx.y; j < n; j += blockDim.y) {
for (i = threadIdx.x; i < m; i += blockDim.x) {
sh_f[j * m + i] = f[(startPixelY + a + j) * width + (startPixelX + l + i)];
}
}
}
__device__ void getData1intoSharedMemory(unsigned char *g, unsigned char *sh_g, int width, char fw, char fh, int startPixelX, int startPixelY, char stepX, char stepY) {
char j, i;
for (j = threadIdx.y; j < fh; j += blockDim.y) {
for (i = threadIdx.x; i < fw; i += blockDim.x) {
sh_g[j * fw + i] = g[(startPixelY + j) * width + (startPixelX + i)];
}
}
}
__device__ void calculateR(unsigned char *sh_f, unsigned char *sh_g, float *R, char *R_posX, char *R_posY, char m, char n, char ww, char wh, char fw, char fh) {
char i, j, zeta, eta;
float fValue, gValue;
float A, B, C;
int theta;
for (eta = threadIdx.y; eta < wh; eta += blockDim.y) {
for (zeta = threadIdx.x; zeta < ww; zeta += blockDim.x) {
theta = eta * ww + zeta;
A = 0; B = 0; C = 0;
for (j = 0; j < n; j++) {
for (i = 0; i < m; i++) {
fValue = (float)sh_f[j * m + i];
gValue = (float)sh_g[(eta + j) * fw + (zeta + i)];
A += fValue * gValue;
B += fValue * fValue;
C += gValue * gValue;
}
}
R[theta] = sqrt((A * A) / (B * C));
R_posX[theta] = zeta;
R_posY[theta] = eta;
}
}
for (theta = (threadIdx.y * blockDim.x + threadIdx.x) + (ww * wh); theta < 4096; theta += blockDim.x * blockDim.y) {
R[theta] = 0;
R_posX[theta] = -128;
R_posY[theta] = -128;
}
}
__device__ void bitonicSort(float *R, char *R_posX, char *R_posY) {
int i, j, k, frac, xtemp, x, group, el0, el1;
for (i = 0; 1 << i <= 2048; i++) {
for (j = i; j >= 0; j--) {
if (j == i) {
for (k = threadIdx.y * blockDim.x + threadIdx.x; k < 2048; k += blockDim.x * blockDim.y) {
frac = (1 << (j + 1)) - 1;
xtemp = k >> j;
x = xtemp << j;
group = (x << 2) + frac;
el0 = (x << 1) + (k & ((1 << j) - 1));
el1 = group - el0;
if (R[el0] < R[el1]) {
swapFloat(&R[el0], &R[el1]);
swapChar(&R_posX[el0], &R_posX[el1]);
swapChar(&R_posY[el0], &R_posY[el1]);
}
}
} else {
for (k = threadIdx.y * blockDim.x + threadIdx.x; k < 2048; k += blockDim.x * blockDim.y) {
xtemp = k >> j;
x = xtemp << j;
el0 = (x << 1) + (k & ((1 << j) - 1));
el1 = el0 + (1 << j);
if (R[el0] < R[el1]) {
swapFloat(&R[el0], &R[el1]);
swapChar(&R_posX[el0], &R_posX[el1]);
swapChar(&R_posY[el0], &R_posY[el1]);
}
}
}
__syncthreads();
}
}
}
__device__ void swapFloat(float *x, float *y) {
float t;
t = *x;
*x = *y;
*y = t;
}
__device__ void swapChar(char *x, char *y) {
char t;
t = *x;
*x = *y;
*y = t;
}
__device__ void subPixelAccuracy(int vectorID, float *R, char *R_posX, char *R_posY, float *subXN, float *subXD, float *subYN, float *subYD, float *R_posSubX, float *R_posSubY, char l, char a, float *endX, float *endY) {
int i, goal;
if (threadIdx.y == 0 && threadIdx.x < 5) {
subXN[threadIdx.x] = 0;
} else if (threadIdx.y == 1 && threadIdx.x < 5) {
subXD[threadIdx.x] = 0;
} else if (threadIdx.y == 2 && threadIdx.x < 5) {
subYN[threadIdx.x] = 0;
} else if (threadIdx.y == 3 && threadIdx.x < 5) {
subYD[threadIdx.x] = 0;
}
__syncthreads();
for (goal = 0; goal < 5; goal++) {
for (i = threadIdx.y * blockDim.x + threadIdx.x; i < 4096; i += blockDim.x * blockDim.y) {
if ((R_posX[i] == R_posX[goal]) && (R_posY[i] == R_posY[goal])) {
subXD[goal] -= (R[i] + R[i]);
subYD[goal] -= (R[i] + R[i]);
} else if ((R_posX[i] == R_posX[goal] - 1) && (R_posY[i] == R_posY[goal])) {
subXN[goal] -= R[i];
subXD[goal] += R[i];
} else if ((R_posX[i] == R_posX[goal] + 1) && (R_posY[i] == R_posY[goal])) {
subXN[goal] += R[i];
subXD[goal] += R[i];
} else if ((R_posX[i] == R_posX[goal]) && (R_posY[i] == R_posY[goal] - 1)) {
subYN[goal] -= R[i];
subYD[goal] += R[i];
} else if ((R_posX[i] == R_posX[goal]) && (R_posY[i] == R_posY[goal] + 1)) {
subYN[goal] += R[i];
subYD[goal] += R[i];
}
}
}
__syncthreads();
if (threadIdx.y == 0 && threadIdx.x < 5) {
float subX;
subX = subXN[threadIdx.x] / (subXD[threadIdx.x] + subXD[threadIdx.x]);
if (subX > 1 || subX < -1) {
subX = 0;
}
R_posSubX[vectorID * 5 + threadIdx.x] = (float)R_posX[threadIdx.x] + subX - (float)l;
} else if (threadIdx.y == 1 && threadIdx.x < 5) {
float subY;
subY = subYN[threadIdx.x] / (subYD[threadIdx.x] + subYD[threadIdx.x]);
if (subY > 1 || subY < -1) {
subY = 0;
}
R_posSubY[vectorID * 5 + threadIdx.x] = (float)R_posY[threadIdx.x] + subY - (float)a;
}
__syncthreads();
if (threadIdx.y == 0 && threadIdx.x == 0) {
endX[vectorID] = R_posSubX[vectorID * 5];
} else if (threadIdx.y == 1 && threadIdx.x == 0) {
endY[vectorID] = R_posSubY[vectorID * 5];
}
__syncthreads();
}
__global__ void vectorCorrection(int totalVectorsX, float *R_posSubX, float *R_posSubY, float *endX, float *endY) {
if ((blockIdx.x != 0) && (blockIdx.x != gridDim.x - 1) && (blockIdx.y != 0) && (blockIdx.y != gridDim.y - 1) && (threadIdx.y == 0) && (threadIdx.x == 0)) {
if (threadIdx.x == 0 && threadIdx.y == 0) {
int i, j, k, vectorID;
float averageX, averageY, dX, dY, X, Y;
vectorID = blockIdx.y * gridDim.x + blockIdx.x;
averageX = 0;
averageY = 0;
X = R_posSubX[vectorID * 5];
Y = R_posSubY[vectorID * 5];
for (j = -1; j <= 1; j++) {
for (i = -1; i <= 1; i++) {
if ((j != 0) || (i != 0)) {
k = j * totalVectorsX + i;
averageX += R_posSubX[(vectorID + k) * 5];
averageY += R_posSubY[(vectorID + k) * 5];
}
}
}
averageX /= 8;
averageY /= 8;
for (i = 1; i < 5; i++) {
dX = R_posSubX[vectorID * 5 + i] - averageX;
dY = R_posSubY[vectorID * 5 + i] - averageY;
if (sqrt(dX * dX + dY * dY) < sqrt((X - averageX) * (X - averageX) + (Y - averageY) * (Y - averageY))) {
X = R_posSubX[vectorID * 5 + i];
Y = R_posSubY[vectorID * 5 + i];
}
}
if (sqrt((X - averageX) * (X - averageX) + (Y - averageY) * (Y - averageY)) > 4) {
X = averageX;
Y = averageY;
}
endX[vectorID] = X;
endY[vectorID] = Y;
}
}
}
__host__ void printSummary(int width, int height, char intWndW, char intWndH, char srhWndL, char srhWndR, char srhWndA, char srhWndB, char stepX, char stepY, int totalVectorsX, int totalVectorsY, int totalVectors, int Dbx, int Dby, int Dgx, int Dgy, float h_time, float d_time) {
int srhWndW, srhWndH, threadsX, threadsY, threads;
float overlap;
srhWndW = 1 + srhWndL + srhWndR;
srhWndH = 1 + srhWndA + srhWndB;
threadsX = Dgx * Dbx;
threadsY = Dgy * Dby;
threads = threadsX * threadsY;
overlap = 1 - ((float)stepX / (float)intWndW);
printf(
"Summary:\n"
"Image size: %d * %d\n"
"Interrogation window wize: %d * %d\n"
"Search window size: %d * %d\n"
"\n"
"Steps: %d * %d\n"
"Overlap: %.2f%% \n"
"\n"
"Vector size: %d * %d\n"
"Total vectors %d\n"
"\n"
"Grid size: %d * %d\n"
"Block size: %d * %d\n"
"Thread size: %d * %d\n"
"Total threads: %d\n"
"\n"
"Processing time (with memcpy): %f sec\n"
"Processing time (without memcpy): %f sec\n"
"\n"
, width, height
, (int)intWndW, (int)intWndH
, (int)srhWndW, (int)srhWndH
, (int)stepX, (int)stepY
, (overlap * 100)
, totalVectorsX, totalVectorsY
, totalVectors
, Dgx, Dgy
, Dbx, Dby
, threadsX, threadsY
, threads
, h_time
, d_time
);
}
__host__ void putData(float *startX, float *startY, float *endX, float *endY, int number, char *path) {
int i;
FILE *fp;
fp = fopen(path, "w");
if (fp == NULL) {
fprintf(stderr, "%s file open error\n", path);
}
for (i = 0; i < number; i++) {
fprintf(fp, "%.1f %.1f %.1f %.1f\n", startX[i], startY[i], endX[i] * 2, endY[i] * 2);
}
fclose(fp);
}
/*vtk output*/
__host__ void putDataVTK(int height, float *startX, float *startY, float *endX, float *endY, int number, int numberX, int numberY, char *pathVTK) {
int i;
FILE *fp;
fp = fopen(pathVTK, "w");
if (fp == NULL) {
fprintf(stderr, "%s file open error.\n", pathVTK);
}
fprintf(fp, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET STRUCTURED_GRID\nDIMENSIONS %d %d 1\nPOINTS %d float\n", numberX, numberY, number);
for (i = 0; i < number; i++) {
fprintf(fp,"%.1f %.1f %d\n", startX[i], height - startY[i], 0);
}
fprintf(fp, "POINT_DATA %d\nSCALARS velocity float\nLOOKUP_TABLE default\n", number);
for (i = 0; i < number; i++) {
fprintf(fp,"%.10f\n", sqrt((endX[i] * endX[i]) + (endY[i] * endY[i])));
}
fprintf(fp, "VECTORS velocity float\n");
for (i = 0; i < number; i++) {
fprintf(fp,"%.10f %.10f %.10f\n", endX[i], -endY[i], .0);
}
} |
13,112 | #include "includes.h"
__global__ void unpack8bits_kernel(float *rcp, float *lcp, const int8_t *src) {
const size_t i = blockDim.x * blockIdx.x + threadIdx.x;
const size_t j = i*2;
rcp[i] = static_cast<float>(src[j]);
lcp[i] = static_cast<float>(src[j+1]);
} |
13,113 | #include "includes.h"
__global__ void calcCDFnormalized(const unsigned int *histo, float *cdf, size_t width, size_t height) {
for (int i = 0; i <= threadIdx.x; i++) {
cdf[threadIdx.x] += (float) histo[i];
}
cdf[threadIdx.x] *= 1.0f / float((width * height));
} |
13,114 | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
// pointer and dimension for host memory
int n, dimA;
float *h_a;
// pointers for device memory
float *d_a, *d_b;
// allocate and initialize host memory
// Bonus: try using cudaMallocHost in place of malloc
dimA = 8;
h_a = (float *) malloc(dimA * sizeof(float));
for (n = 0; n < dimA; n++)
{
h_a[n] = (float) n;
}
// Part 1 of 5: allocate device memory
size_t memSize = dimA * sizeof(float);
cudaMalloc((void**)&d_a,memSize);
cudaMalloc((void**)&d_b,memSize);
// Part 2 of 5: host to device memory copy
cudaMemcpy(d_a,h_a, memSize,cudaMemcpyHostToDevice);
// Part 3 of 5: device to device memory copy
cudaMemcpy(d_b,d_a, memSize,cudaMemcpyDeviceToDevice);
// clear host memory
for (n = 0; n < dimA; n++)
{
h_a[n] = 0.f;
}
// Part 4 of 5: device to host copy
cudaMemcpy(h_a,d_b, memSize,cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy calls");
// verify the data on the host is correct
for (n = 0; n < dimA; n++)
{
assert(h_a[n] == (float ) n);
}
// Part 5 of 5: free device memory pointers d_a and d_b
cudaFree(d_a);
cudaFree(d_b);
// Check for any CUDA errors
checkCUDAError("cudaFree");
// free host memory pointer h_a
// Bonus: be sure to use cudaFreeHost for memory allocated with cudaMallocHost
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
|
13,115 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void hello(char* d_buf)
{
char str[] = "Hello";
for(int i = 0; i < 6; i++) {
d_buf[i] = str[i];
}
}
int main(int argc, char* argv[])
{
char buf[100];
char *d_buf;
cudaMalloc((void **) &d_buf, 100);
hello<<<1, 1>>>(d_buf);
cudaMemcpy(buf, d_buf, 100, cudaMemcpyDeviceToHost);
cudaFree(d_buf);
printf("%s\n", buf);
cudaDeviceReset();
return 0;
}
|
13,116 | #include "includes.h"
__global__ void xvpy_f32 (float* x, float* v, float* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * v[idx];
}
} |
13,117 | //fail
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//64
__device__ void bar(float x) {
assert(0);
}
__global__ void foo(int* A) {
bar(A[0]);
}
|
13,118 | // System includes
#include <stdio.h>
// CUDA runtime
#include <cuda_runtime.h>
#include<device_launch_parameters.h>
int main()
{
int count;
cudaGetDeviceCount(&count);
printf("Available devices: %d\n", count);
cudaDeviceProp prop;
for(int i = 0; i < count; i++)
{
cudaGetDeviceProperties(&prop, i);
printf("Device: %d: %s\n", i, prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Max grid dims: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Max block dims: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Shared mem per block %d\n", prop.sharedMemPerBlock);
printf("Clock speed: %d\n", prop.clockRate);
printf("Global memory size %d\n", prop.totalGlobalMem);
}
return 0;
}
|
13,119 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
float* readImage(int* width, int* height, std::string path) {
std::ifstream reader;
reader.open(path);
if (!reader.is_open()) {
std::cerr << "Unable to open file." << std::endl;
exit(1);
}
// Read file content
// Read magic number
std::string magicNumber;
reader >> magicNumber;
if (magicNumber != "P3") {
std::cerr << "The magic number is not P3 (The file is not an ASCII PPM file)." << std::endl;
exit(1);
}
// Read width and height
std::string widthStr, heightStr;
reader >> widthStr >> heightStr;
*width = stoi(widthStr);
*height = stoi(heightStr);
std::cout << "The image dimension is " << *width << " x " << *height << "." << std::endl;
// Read range
std::string rangeStr;
float range;
reader >> rangeStr;
range = stof(rangeStr);
// Read pixel values of the image, allocate a vector, and store the image's pixel values in the vector
int n = *width * *height * 3;
float* vec = (float*) malloc(n * sizeof(float));
if (vec == NULL) {
std::cout << "Malloc for vector in readImage(int* width, int* height, std::string path) fails." << std::endl;
exit(1);
}
std::string pixelValStr;
for (int i = 0; i < n; i++) {
reader >> pixelValStr;
vec[i] = stof(pixelValStr);
}
// Close the file
reader.close();
std::cout << "Finish reading image file." << std::endl;
return vec;
}
void writeImage(float* vec, int width, int height, std::string path) {
std::cout << "Start writing image file." << std::endl;
std::ofstream writer;
writer.open(path);
if (!writer.is_open()) {
std::cerr << "Unable to write file." << std::endl;
exit(1);
}
writer << "P3\n";
writer << width << " " << height << "\n";
writer << "255\n";
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
for (int c = 0; c < 3; c++) {
int index = (row * width + col) * 3 + c;
writer << (int) vec[index] << " ";
}
}
writer << "\n";
}
writer.close();
std::cout << "Finish writing image file.\n" << std::endl;
}
void colorToGrayCPU(std::string readPath, std::string writePath) {
int width, height;
float* inVec = readImage(&width, &height, readPath);
float* outVec = (float*) malloc(width * height * 3 * sizeof(float));
if (outVec == NULL) {
std::cout << "Malloc for vector in colorToGrayCPU(std::string readPath, std::string writePath) fails." << std::endl;
exit(1);
}
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int index = (row * width + col) * 3;
float r = inVec[index];
float g = inVec[index + 1];
float b = inVec[index + 2];
float gray = 0.21 * r + 0.72 * g + 0.07 * b;
outVec[index] = gray;
outVec[index + 1] = gray;
outVec[index + 2] = gray;
}
}
writeImage(outVec, width, height, writePath);
// Free allocated memories
free(outVec);
free(inVec);
}
__global__
void colorToGrayKernel(float* inVec, float* outVec, int width, int height) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < height && col < width) {
int index = (row * width + col) * 3;
float r = inVec[index];
float g = inVec[index + 1];
float b = inVec[index + 2];
float gray = 0.21 * r + 0.72 * g + 0.07 * b;
outVec[index] = gray;
outVec[index + 1] = gray;
outVec[index + 2] = gray;
}
}
__global__
void gaussianBlurKernel(float* inVec, float* outVec, int width, int height, float* blurMat, int blurSize) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < height && col < width) {
int halfBlurSize = blurSize / 2;
int counter = 0;
float r = 0;
float g = 0;
float b = 0;
for (int x = -halfBlurSize; x <= halfBlurSize; x++) {
for (int y = -halfBlurSize; y <= halfBlurSize; y++) {
int curRow = fmaxf(0, fminf(row + x, height - 1));
int curCol = fmaxf(0, fminf(col + y, width - 1));
int curIndex = (curRow * width + curCol) * 3;
r += inVec[curIndex] * blurMat[counter];
g += inVec[curIndex + 1] * blurMat[counter];
b += inVec[curIndex + 2] * blurMat[counter];
counter++;
}
}
int index = (row * width + col) * 3;
outVec[index] = r;
outVec[index + 1] = g;
outVec[index + 2] = b;
}
}
void colorToGrayGPU(std::string readPath, std::string writePath) {
int width, height;
float* inVec = readImage(&width, &height, readPath);
int size = width * height * 3 * sizeof(float);
float* outVec = (float*) malloc(size);
if (outVec == NULL) {
std::cout << "Malloc for vector in colorToGrayGPU(std::string readPath, std::string writePath) fails." << std::endl;
exit(1);
}
float* d_inVec, *d_outVec;
cudaMalloc((void**) &d_inVec, size);
cudaMemcpy(d_inVec, inVec, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_outVec, size);
cudaMemcpy(d_outVec, outVec, size, cudaMemcpyHostToDevice);
// Launch kernel
dim3 dimGrid(ceil(width / 16.0), ceil(height / 16.0), 1);
dim3 dimBlock(16, 16, 1);
colorToGrayKernel<<<dimGrid, dimBlock>>>(d_inVec, d_outVec, width, height);
cudaMemcpy(outVec, d_outVec, size, cudaMemcpyDeviceToHost);
cudaFree(d_inVec);
cudaFree(d_outVec);
writeImage(outVec, width, height, writePath);
// Free allocated memories
free(outVec);
free(inVec);
}
float* genGaussianBlurMat(int blurSize, float sigma) {
const float PI = 3.14159265359;
float* blurMat = (float*) malloc(blurSize * blurSize * sizeof(float)); // Essentially a Gaussian blur matrix flattened into a 1D vector
int halfBlurSize = blurSize / 2;
float sum = 0.0;
int counter = 0;
for (int x = -halfBlurSize; x <= halfBlurSize; x++) {
for (int y = -halfBlurSize; y <= halfBlurSize; y++) {
float g = 1.0 / (2 * PI * sigma * sigma) * exp(-(x * x + y * y) / (2 * sigma * sigma));
blurMat[counter] = g;
sum += g;
counter++;
}
}
// Normalize the weights
counter = 0;
for (int x = -halfBlurSize; x <= halfBlurSize; x++) {
for (int y = -halfBlurSize; y <= halfBlurSize; y++) {
blurMat[counter] /= sum;
counter++;
}
}
return blurMat;
}
void gaussianBlurGPU(std::string readPath, std::string writePath, int blurSize, float sigma) {
int width, height;
float* inVec = readImage(&width, &height, readPath);
int size = width * height * 3 * sizeof(float);
float* outVec = (float*) malloc(size);
if (outVec == NULL) {
std::cout << "Malloc for vector in colorToGrayGPU(std::string readPath, std::string writePath) fails." << std::endl;
exit(1);
}
// If blurSize is even, treat it as if it were odd so they have the same halfBlurSize
// For example, blurSize = 4 and blurSize = 5 both have the same halfBlurSize 2
if (blurSize % 2 == 0) {
blurSize++;
}
float* blurMat = genGaussianBlurMat(blurSize, sigma);
float* d_inVec, *d_outVec, *d_blurMat;
cudaMalloc((void**) &d_inVec, size);
cudaMemcpy(d_inVec, inVec, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_outVec, size);
cudaMemcpy(d_outVec, outVec, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_blurMat, blurSize * blurSize * sizeof(float));
cudaMemcpy(d_blurMat, blurMat, blurSize * blurSize * sizeof(float), cudaMemcpyHostToDevice);
// Launch kernel
dim3 dimGrid(ceil(width / 16.0), ceil(height / 16.0), 1);
dim3 dimBlock(16, 16, 1);
gaussianBlurKernel<<<dimGrid, dimBlock>>>(d_inVec, d_outVec, width, height, d_blurMat, blurSize);
cudaMemcpy(outVec, d_outVec, size, cudaMemcpyDeviceToHost);
cudaFree(d_inVec);
cudaFree(d_outVec);
cudaFree(d_blurMat);
writeImage(outVec, width, height, writePath);
// Free allocated memories
free(outVec);
free(inVec);
free(blurMat);
}
int main() {
// TODO
std::string readPath = "pineapple_pizza.ppm";
std::string writePathGrayCPU = "pineapple_pizza_gray_cpu.ppm";
std::string writePathGrayGPU = "pineapple_pizza_gray_gpu.ppm";
std::string writePathBlurGPU = "pineapple_pizza_blur_gpu.ppm";
std::string writePathGrayBlurGPU = "pineapple_pizza_gray_blur_gpu.ppm";
colorToGrayCPU(readPath, writePathGrayCPU);
colorToGrayGPU(readPath, writePathGrayGPU);
gaussianBlurGPU(readPath, writePathBlurGPU, 21, 7.0);
gaussianBlurGPU(writePathGrayGPU, writePathGrayBlurGPU, 21, 7.0);
return 0;
} |
13,120 | #include "includes.h"
__global__ void findDiffLabels(float* devDiff, int diffPitchInFloats, int nPoints, int nClusters, int* devClusters, int* devChanges) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ unsigned int localChanges;
if (x < nPoints) {
int index = x;
float minDistance = 10000000;
int minCluster = -1;
for(int cluster = 0; cluster < nClusters; cluster++) {
float clusterDistance = devDiff[index];
if (clusterDistance < minDistance) {
minDistance = clusterDistance;
minCluster = cluster;
}
index += diffPitchInFloats;
}
int previousCluster = devClusters[x];
devClusters[x] = minCluster;
if (minCluster != previousCluster) {
atomicInc(&localChanges, 10000000);
}
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(devChanges, localChanges);
}
} |
13,121 | #include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/zip_iterator.h>
//---------------------------------------------------------------------------
// NVCC is not yet able to compile C++11 code.
// Hence the need to keep Thrust and VexCL code in separate files.
//---------------------------------------------------------------------------
template <typename Key, typename Val>
void thrust_sort_by_key(Key *key_begin, Key *key_end, Val *val_begin) {
thrust::sort_by_key(
thrust::device_pointer_cast(key_begin),
thrust::device_pointer_cast(key_end),
thrust::device_pointer_cast(val_begin)
);
}
template <typename Key, typename Val>
Val* thrust_reduce_by_key(
const Key *key_begin, const Key *key_end, const Val *val_begin,
Key *key_output, Val *val_output
)
{
thrust::pair< thrust::device_ptr<Key>, thrust::device_ptr<Val> >
end = thrust::reduce_by_key(
thrust::device_pointer_cast(key_begin),
thrust::device_pointer_cast(key_end),
thrust::device_pointer_cast(val_begin),
thrust::device_pointer_cast(key_output),
thrust::device_pointer_cast(val_output)
);
return thrust::raw_pointer_cast(end.second);
}
//---------------------------------------------------------------------------
// Same thing, for a pair of keys
//---------------------------------------------------------------------------
template <typename Key1, typename Key2, typename Val>
void thrust_sort_by_key(
Key1 *key1_begin, Key1 *key1_end, Key2 *key2_begin, Val *val_begin)
{
thrust::sort_by_key(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_pointer_cast(key1_begin),
thrust::device_pointer_cast(key2_begin)
)
),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_pointer_cast(key1_end),
thrust::device_pointer_cast(key2_begin + (key1_end - key1_begin))
)
),
thrust::device_pointer_cast(val_begin)
);
}
template <typename Key1, typename Key2, typename Val>
Val* thrust_reduce_by_key(
const Key1 *key1_begin, const Key1 *key1_end, const Key2 *key2_begin,
const Val *val_begin,
Key1 *key1_output, Key2 *key2_output, Val *val_output
)
{
thrust::pair<
thrust::zip_iterator<
thrust::tuple<
thrust::device_ptr<Key1>,
thrust::device_ptr<Key2>
>
>,
thrust::device_ptr<Val>
> end = thrust::reduce_by_key(
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_pointer_cast(key1_begin),
thrust::device_pointer_cast(key2_begin)
)
),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_pointer_cast(key1_end),
thrust::device_pointer_cast(key2_begin + (key1_end - key1_begin))
)
),
thrust::device_pointer_cast(val_begin),
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::device_pointer_cast(key1_output),
thrust::device_pointer_cast(key2_output)
)
),
thrust::device_pointer_cast(val_output)
);
return thrust::raw_pointer_cast(end.second);
}
//---------------------------------------------------------------------------
// Due to the code separation we also need to explicitly instantiate the
// necessary templates.
//---------------------------------------------------------------------------
#define VEXCL_INSTANTIATE_THRUST_SORT_BY_KEY(K, V) \
template void thrust_sort_by_key<K, V>( \
K * key_begin, K * key_end, V * val_begin)
VEXCL_INSTANTIATE_THRUST_SORT_BY_KEY(int, double);
#undef VEXCL_INSTANTIATE_THRUST_SORT_BY_KEY
#define VEXCL_INSTANTIATE_THRUST_REDUCE_BY_KEY(K, V) \
template V * thrust_reduce_by_key<K, V>( \
const K * key_begin, const K * key_end, const V * val_begin, \
K * key_output, V * val_output)
VEXCL_INSTANTIATE_THRUST_REDUCE_BY_KEY(int, double);
#undef VEXCL_INSTANTIATE_THRUST_REDUCE_BY_KEY
#define VEXCL_INSTANTIATE_THRUST_SORT_BY_KEY2(K1, K2, V) \
template void thrust_sort_by_key<K1, K2, V>( \
K1 * key1_begin, K1 * key1_end, K2 * key2_begin, V * val_begin)
VEXCL_INSTANTIATE_THRUST_SORT_BY_KEY2(int, int, double);
#undef VEXCL_INSTANTIATE_THRUST_SORT_BY_KEY2
#define VEXCL_INSTANTIATE_THRUST_REDUCE_BY_KEY2(K1, K2, V) \
template V *thrust_reduce_by_key<K1, K2, V>( \
const K1 * key1_begin, const K1 * key1_end, const K2 * key2_begin, \
const V * val_begin, K1 * key1_output, K2 * key2_output, V * val_output)
VEXCL_INSTANTIATE_THRUST_REDUCE_BY_KEY2(int, int, double);
#undef VEXCL_INSTANTIATE_THRUST_REDUCE_BY_KEY2
|
13,122 |
/*******
The code below is the original code, edited so that it would run on CUDA
Compute Capability 6.1 hardware (EVGA/NVIDIA GTX 1070) with CUDA v9.0.176.
The display driver being used is NVIDIA 384.111. The OS is Debian Linux v9
('Sid').
Charles W Johnson
April, 2018
*******/
/*************************************************************************************
Implementing Single Source Shortest Path on CUDA 1.1 Hardware using algorithm
given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
This Kernel copies the Updating cost array Ua to the actual cost array Ca. It also toggles
a global flag d_finished. If no thread changes d_finished the execution stops.
Created by Pawan Harish.
**************************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define MAX_THREADS_PER_BLOCK 512
__global__ void DijkastraKernel2(int* g_graph_nodes, int* g_graph_edges, short int* g_graph_weights,
int* g_graph_updating_cost, bool* g_graph_mask,
int* g_cost, bool *d_finished, int no_of_nodes, int edge_list_size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if ((tid < no_of_nodes) && (g_cost[tid] > g_graph_updating_cost[tid]))
{
g_cost[tid] = g_graph_updating_cost[tid];
g_graph_mask[tid] = true;
*d_finished = true;
}
if (tid < no_of_nodes) {
g_graph_updating_cost[tid] = g_cost[tid];
}
}
|
13,123 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#define MAX 1024
__global__ void reduce(int *g_idata, int *g_odata, int length){
extern __shared__ int sdata[];
int next_power_of_2 = (int)pow( (float)2.0, (int) ceil( log2( (float) blockDim.x ) ) );
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(tid>length) return;
sdata[tid] = g_idata[i];
__syncthreads();
for(unsigned int s=next_power_of_2/2; s>0; s>>=1) {
if (tid < s && tid+s< blockDim.x) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int soma_seq(int *x, int tamanho){
int i,sum=0;
for(i=0;i<tamanho;i++) {
sum+=x[i];
}
return sum;
}
int main(int argc, char * argv[])
{
const int N = atoi(argv[1]);
int a[MAX], b[MAX], size = N;
int *dev_a, *dev_b, *dev_size;
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_size, sizeof(int) );
for (int i=0; i<N; i++ )
a[i]= 1;
int valor_seq = soma_seq(a, N);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy (dev_a,a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy (dev_b,b, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy (dev_size, &size, 1*sizeof(int), cudaMemcpyHostToDevice);
reduce<<<64,N, N* sizeof(int)>>>(dev_a, dev_b, size);
cudaMemcpy(b, dev_b, N*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%.10f\n", time);
if(b[0] != valor_seq)
printf("Soma incorreta\n");
//printf("Soma: %d\n", b[0]);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
13,124 | #include <cuda.h>
#include "cuda_runtime.h"
// #include <cutil.h>
#include "texture_fetch_functions.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << cudaGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
cudaDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
cudaSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
cudaMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
cudaMemcpyToSymbol(prt, pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
cudaMemcpy(p, pd, sizeof(int) * size, cudaMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} |
13,125 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <float.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
inline cudaError_t _checkError(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
void read_matrix(int **r_ptr, int** c_ind,float** v, char*fname,int* r_count,int* v_count){
//*************************************************************
//READ AND CONVERT COO MATRIX TO CSR MATRIX
//*************************************************************
FILE * file;
if ((file = fopen(fname, "r+")) == NULL)
{
printf("ERROR: file open failed\n");
return;
}
int column_count,row_count,values_count;
fscanf(file, "%d %d %d\n",&row_count,&column_count,&values_count);
*r_count = row_count;
*v_count = values_count;
int i;
int *row_ptr =(int*) malloc((row_count+1) * sizeof(int));
int *col_ind =(int*) malloc(values_count * sizeof(int));
for(i=0; i<values_count; i++){
col_ind[i] = -1;
}
float *values =(float*) malloc(values_count * sizeof(float));
int row,column;
float value;
//*************************************************************
//GENERATING THE ROW VECTOR FOR CSR
//*************************************************************
while (1) {
int ret = fscanf(file, "%d %d %f\n",&row,&column,&value);
column --;
row --;
if(ret == 3){
row_ptr[row]++;
} else if(ret == EOF) {
break;
} else {
printf("No match.\n");
}
}
rewind(file);
//printf("The row count: %d\n",row_count);
int index = 0;
int val = 0;
for(i = 0; i<row_count;i++){
val = row_ptr[i];
//printf("The value is : %d\n",val);
row_ptr[i] = index;
index += val;
//printf("row_ptr[%d] = %d\n",i, row_ptr[i]);
}
row_ptr[row_count] = values_count;
fscanf(file, "%d %d %d\n",&row_count,&column_count,&values_count);
i = 0;
while (1) {
int ret = fscanf(file, "%d %d %f\n",&row,&column,&value);
column --;
row --;
if(ret == 3){
while(col_ind[i+row_ptr[row]] != -1){ i++;}
col_ind[i+row_ptr[row]] = column;
values[i+row_ptr[row]] = value;
i=0;
} else if(ret == EOF) {
break;
} else {
printf("No match.\n");
}
}
fclose(file);
*r_ptr = row_ptr;
*c_ind = col_ind;
*v = values;
}
void cpu_multiply(const int num_rows,const int *ptr, const int *indices, const float *data, const float *x, float* y){
float dot;
int row_start, row_end;
for (int i=0; i<num_rows; i++){
dot = 0;
/*printf("CPU Matrix\n");
for(int l = 0; l<num_rows;l++){
if(l+1 <= num_rows){
for(int k = ptr[l]; k < ptr[l+1];k++){
printf("%d %d %.10f\n",l+1,indices[k]+1,data[k]);
}
}
}*/
row_start = ptr[i];
row_end = ptr[i + 1];
//printf("row_start: %d\t row end: %d\n",row_start, row_end);
for(int j = row_start; j < row_end; j++){
//printf("data[%d] = %10f * indices[%d] = %d x[indices[%d]]= %10f\n", i, data[i], i, indices[i]+1, i, x[indices[i]]);
dot+= data[j] * x[indices[j]];
}
y[i] += dot;
}
}
__global__ void vector_multiply_unopti(const int num_rows,const int *ptr,const int *indices,const float *data,const float *x, float* y){
int row = blockIdx.x * blockDim.x + threadIdx.x;
/*printf("Kernel Matrix\n");
for(int i = 0; i<num_rows;i++){
if(i+1 <= num_rows){
for(int k = ptr[i]; k < ptr[i+1];k++){
printf("%d %d %.10f\n",i+1,indices[k]+1,data[k]);
}
}
}*/
//printf("In Kernel:\n",row);
int i;
int row_start, row_end;
float dot;
if(row < num_rows){
//printf("row=%d\n",row);
dot = 0;
/*printf("Kernel Matrix\n");
for(int l = 0; l<num_rows;l++){
if(l+1 <= num_rows){
for(int k = ptr[l]; k < ptr[l+1];k++){
printf("%d %d %.10f\n",l+1,indices[k]+1,data[k]);
}
}
}*/
row_start = ptr[row];
row_end = ptr[row + 1];
//printf("row_start: %d\t row end: %d\n",row_start, row_end);
for(i = row_start; i < row_end; i++){
//printf("data[%d] = %10f * indices[%d] = %d x[indices[%d]]= %10f\n", i, data[i], i, indices[i]+1, i, x[indices[i]]);
dot+= data[i] * x[indices[i]];
}
}
y[row] += dot;
}
__global__ void vector_multiply_unroll(const int num_rows,const int *ptr,const int *indices,const float *data,const float *x, float* y){
int row;// = blockIdx.x * blockDim.x + threadIdx.x;
//printf("In Kernel:\n",row);
int i;
int row_start, row_end;
float dot;
#pragma unroll
for (row = blockIdx.x * blockDim.x + threadIdx.x; row < num_rows; row += blockDim.x * gridDim.x)
{
dot = 0;
/*printf("Kernel Matrix\n");
for(int l = 0; l<num_rows;l++){
if(l+1 <= num_rows){
for(int k = ptr[l]; k < ptr[l+1];k++){
printf("%d %d %.10f\n",l+1,indices[k]+1,data[k]);
}
}
}*/
row_start = ptr[row];
row_end = ptr[row + 1];
#pragma unroll
for(i = row_start; i < row_end; i++){
dot+= data[i] * x[indices[i]];
}
}
}
//TODO: Fix this implementation
__global__ void vector_multiply_shared(const int num_rows,const int *ptr,const int *indices,const float *data,const float *x, float* y){
__shared__ float s[256];
int row = blockIdx.x * blockDim.x + threadIdx.x;
//printf("In Kernel:\n",row);
s[row]=0.0;
/*for(int l=0; l<num_rows; l++){
s[l] = 0.0;
}*/
//printf("No error:\n",row);
int i;
int row_start, row_end;
//float dot;
if(row < num_rows){
//dot = 0;
s[row] = 0.0;
/*printf("Kernel Matrix\n");
for(int l = 0; l<num_rows;l++){
if(l+1 <= num_rows){
for(int k = ptr[l]; k < ptr[l+1];k++){
printf("%d %d %.10f\n",l+1,indices[k]+1,data[k]);
}
}
}*/
row_start = ptr[row];
row_end = ptr[row + 1];
#pragma unroll
for(i = row_start; i < row_end; i++){
s[row]+= data[i] * x[indices[i]];
}
}
//s[row] += dot;
__syncthreads();
y[row] = s[row];
}
__global__ void vector_multiply_stride(const int num_rows,const int *ptr,const int *indices,const float *data,const float *x, float* y){
int row;// = blockIdx.x * blockDim.x + threadIdx.x;
//printf("In Kernel:\n",row);
int i;
int row_start, row_end;
float dot;
for (row = blockIdx.x * blockDim.x + threadIdx.x; row < num_rows; row += blockDim.x * gridDim.x)
{
dot = 0;
/*printf("Kernel Matrix\n");
for(int l = 0; l<num_rows;l++){
if(l+1 <= num_rows){
for(int k = ptr[l]; k < ptr[l+1];k++){
printf("%d %d %.10f\n",l+1,indices[k]+1,data[k]);
}
}
}*/
row_start = ptr[row];
row_end = ptr[row + 1];
for(i = row_start; i < row_end; i++){
dot+= data[i] * x[indices[i]];
}
}
y[row] += dot;
}
//TODO: Fix the prefetch method
__global__ void vector_multiply_stride_prefetch(const int num_rows,const int *ptr,const int *indices,const float *data,const float *x, float* y){
int row;// = blockIdx.x * blockDim.x + threadIdx.x;
//printf("In Kernel:\n",row);
int i;
int row_start, row_end;
float dot;
#pragma unroll
for (row = blockIdx.x * blockDim.x + threadIdx.x; row < num_rows; row += blockDim.x * gridDim.x)
{
dot = 0;
//__prefetch_global_l1(row_start);
/*printf("Kernel Matrix\n");
for(int l = 0; l<num_rows;l++){
if(l+1 <= num_rows){
for(int k = ptr[l]; k < ptr[l+1];k++){
printf("%d %d %.10f\n",l+1,indices[k]+1,data[k]);
}
}
}*/
row_start = ptr[row];
row_end = ptr[row + 1];
#pragma unroll
for(i = row_start; i < row_end; i++){
dot+= data[i] * x[indices[i]];
}
}
y[row] += dot;
}
int main (int argc, char* argv[]){
//*************************************************************
//Argument List: vector_to_multiply, repetitions_for_the code,
// verbose, filename_matrixi,
// optimizations_to_apply
//*************************************************************
//*************************************************************
//HOST CODE
//*************************************************************
if ( argc != 6){
printf( "Incorrect usage");
}
else{
int* row_ptr;
int* col_ind;
float* values;
int r_count, v_count, i, k, j;
int num = atoi(argv[1]);
int repetitions = atoi(argv[2]);
int mode = atoi(argv[3]);
char* fname = argv[4];
int opti = atoi(argv[5]);
//*************************************************************
//READING THE GIVEN COORDINATE FORM MATRIX AND CONVERTING IT TO
//COMPRESSED SPARSE ROW MATRIX
//*************************************************************
read_matrix(&row_ptr, &col_ind, &values, fname, &r_count, &v_count);
float* x =(float*) malloc(r_count* sizeof(float));
float* y =(float*) calloc(r_count, sizeof(float));
float* cy =(float*) calloc(r_count, sizeof(float));
for(i = 0; i<r_count;i++){
x[i]= (float)num;
}
if(mode == 1){
//*************************************************************
//PRINT OUT THE GIVEN COORDINATE MATRIX STORED IN THE FORM OF
//CSR MATRIX
//*************************************************************
fprintf(stdout,"PERFORMING SPMV ON MATRIX: \n");
for(i = 0; i<r_count;i++){
if(i+1 <= r_count){
for(k = row_ptr[i]; k < row_ptr[i+1];k++){
fprintf(stdout,"%d %d %.10f\n",i+1,col_ind[k]+1,values[k]);
}
}
}
}
//TODO: Timer for CPU run
//*************************************************************
//RUNNING ON CPU
//*************************************************************
/*struct timeval cstart, cend;
long mtime, secs, usecs;*/
float cdot;
//gettimeofday(&cstart, NULL);
for(k = 0; k<repetitions; k++){
//cpu_multiply(r_count, row_ptr, col_ind, values, x, y);
for (i=0; i<r_count; i++){
cdot = 0;
for(j = row_ptr[i]; j < row_ptr[i+1]; j++){
cdot+= values[j] * x[col_ind[j]];
}
cy[i] += cdot;
}
}
/*gettimeofday(&cend, NULL);
secs = cend.tv_sec - cstart.tv_sec;
usecs = cend.tv_usec - cstart.tv_usec;
mtime = ((secs) * 1000 + usecs/1000.0) + 0.5;
printf("Elapsed time: %ld millisecs\n", mtime);*/
//*************************************************************
//COMPLETED RUNNING ON CPU
//*************************************************************
//*************************************************************
//SETTING UP GPU RUN
//*************************************************************
int *d_row_ptr, *d_col_ind;
float *d_values, *d_x, *d_y;
cudaEvent_t start,stop,st,sp;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&st);
cudaEventCreate(&sp);
float milliseconds = 0;
//float timing = 0;
//int bytes = r_count * sizeof(float);
switch(opti){
case 1:
//*************************************************************
//RUNNING KERNEL WITH NO OPTIMIZATIONS MONOLITHIC
//*************************************************************
cudaMalloc(&d_row_ptr, r_count*sizeof(int));
cudaMalloc(&d_col_ind, v_count*sizeof(int));
cudaMalloc(&d_values, v_count*sizeof(int));
cudaMalloc(&d_x, r_count*sizeof(float));
cudaMalloc(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_unopti<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
break;
case 2:
//*************************************************************
//RUNNING KERNEL WITH NO OPTIMIZATIONS STRIDE
//*************************************************************
cudaMalloc(&d_row_ptr, r_count*sizeof(int));
cudaMalloc(&d_col_ind, v_count*sizeof(int));
cudaMalloc(&d_values, v_count*sizeof(int));
cudaMalloc(&d_x, r_count*sizeof(float));
cudaMalloc(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_stride<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
break;
case 3:
//*************************************************************
//RUNNING KERNEL WITH LOOP UNROLLING
//*************************************************************
cudaMalloc(&d_row_ptr, r_count*sizeof(int));
cudaMalloc(&d_col_ind, v_count*sizeof(int));
cudaMalloc(&d_values, v_count*sizeof(int));
cudaMalloc(&d_x, r_count*sizeof(float));
cudaMalloc(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_unroll<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
break;
case 4:
//*************************************************************
//RUNNING KERNEL WITH DATA TRANSFER OPTIMIZATIONS
//*************************************************************
//int bytes = r_count * sizeof(float);
cudaMallocHost(&d_row_ptr, r_count*sizeof(int));
cudaMallocHost(&d_col_ind, v_count*sizeof(int));
cudaMallocHost(&d_values, v_count*sizeof(int));
cudaMallocHost(&d_x, r_count*sizeof(float));
cudaMallocHost(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
//cudaEventRecord(st);
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
//cudaEventRecord(sp);
//cudaEventSynchronize(sp);
//cudaEventElapsedTime(&timing, st, sp);
//printf("Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / timing);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_unopti<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
//cudaEventRecord(st);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
//cudaEventRecord(sp);
//cudaEventSynchronize(sp);
//cudaEventElapsedTime(&timing, st, sp);
//printf("Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / timing);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(st);
cudaEventDestroy(sp);
break;
case 5:
//*************************************************************
//RUNNING KERNEL WITH DATA TRANSFER OPTIMIZATIONS AND
//LOOP UNROLLING
//*************************************************************
cudaMallocHost(&d_row_ptr, r_count*sizeof(int));
cudaMallocHost(&d_col_ind, v_count*sizeof(int));
cudaMallocHost(&d_values, v_count*sizeof(int));
cudaMallocHost(&d_x, r_count*sizeof(float));
cudaMallocHost(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_unroll<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
break;
case 6:
//*************************************************************
//RUNNING KERNEL WITH DATA TRANSFER OPTIMIZATIONS,
//LOOP UNROLLING AND USING SHARED MEMORY
//*************************************************************
cudaMallocHost(&d_row_ptr, r_count*sizeof(int));
cudaMallocHost(&d_col_ind, v_count*sizeof(int));
cudaMallocHost(&d_values, v_count*sizeof(int));
cudaMallocHost(&d_x, r_count*sizeof(float));
cudaMallocHost(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_shared<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
break;
default:
//*************************************************************
//RUNNING KERNEL WITH NO OPTIMIZATIONS IN DEFAULT CASE
//*************************************************************
cudaMalloc(&d_row_ptr, r_count*sizeof(int));
cudaMalloc(&d_col_ind, v_count*sizeof(int));
cudaMalloc(&d_values, v_count*sizeof(int));
cudaMalloc(&d_x, r_count*sizeof(float));
cudaMalloc(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
//printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
float blocksize = 64;
float blocknum = ceil(r_count/blocksize);
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
vector_multiply_unopti<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
}
/*cudaMalloc(&d_row_ptr, r_count*sizeof(int));
cudaMalloc(&d_col_ind, v_count*sizeof(int));
cudaMalloc(&d_values, v_count*sizeof(int));
cudaMalloc(&d_x, r_count*sizeof(float));
cudaMalloc(&d_y, r_count*sizeof(float));*/
/*cudaMallocHost(&d_row_ptr, r_count*sizeof(int));
cudaMallocHost(&d_col_ind, v_count*sizeof(int));
cudaMallocHost(&d_values, v_count*sizeof(int));
cudaMallocHost(&d_x, r_count*sizeof(float));
cudaMallocHost(&d_y, r_count*sizeof(float));
cudaMemcpy(d_row_ptr, row_ptr, r_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_col_ind, col_ind, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, v_count*sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(k = 0; k<repetitions; k++){
printf("Calling Kernel\n");
cudaMemcpy(d_x, x, r_count*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, r_count*sizeof(float), cudaMemcpyHostToDevice);
// kernel call
float blocksize = 64;
float blocknum = ceil(r_count/blocksize); //number of threads fixed and equal to row count
//printf("blocknum: %f\n",blocknum);
//blocknum = 1.0;
//printf("blocknum: %f\n",blocknum);
//vector_multiply_unopti<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
vector_multiply_unroll<<<blocknum, blocksize>>>(r_count, d_row_ptr, d_col_ind, d_values, d_x, d_y);
cudaMemcpy(y, d_y, r_count*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i<r_count;i++){
x[i] = y[i];
y[i]= 0.0;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);*/
//*************************************************************
//GPU RUN COMPLETED
//*************************************************************
//*************************************************************
//PRINTING RESULTANT MATRIX.
//ONLY NON ZERO ELEMENTS ARE DISPLAYED.
//*************************************************************
int count = 0;
if(mode == 1){
fprintf(stdout,"Calculated Vector Matrix\n");
for(i = 0; i<r_count;i++){
if(x[i] != 0){
fprintf(stdout,"%.10f\n",x[i]);
count++;
}
}
fprintf(stdout,"count = %d\n", count);
}
fprintf(stdout,"time = %f\n", milliseconds);
//fprintf(stdout,"CPU time = %f\n",cpu_time_used);
//*************************************************************
//FREE MEMORY
//*************************************************************
cudaFree(d_row_ptr);
cudaFree(d_col_ind);
cudaFree(d_values);
cudaFree(d_x);
cudaFree(d_y);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
}
|
13,126 | #include <iostream>
#include <fstream>
#include <cstdlib>
#include <cmath>
#include <string>
#include <chrono>
#include <cooperative_groups.h>
using namespace std;
#include <cuda.h>
#include <curand_kernel.h>
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
#define N_PROP 1100
#define THREADS_PER_BLOCK 256
#define N_BLOCKS 1 //Needed to be equal to 1 for the correct synchronization between threads
__device__ float dC = 0.0f;
__device__ int random_int(curandState &crstate, int min, int max){
float myrandf = curand_uniform(&crstate);
myrandf *= (max - min + 0.999999);
myrandf += min;
return (int)truncf(myrandf);
}
__global__ void trainBM(const float* weights, unsigned *net_state, const unsigned int net_size,
const float temp_init, const float final_temp, const float cooling_rate, int seed) {
// Handle to thread block group
__shared__ float shared_weights_array[THREADS_PER_BLOCK];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int nodes_per_thread = ceil((float)net_size/(float)THREADS_PER_BLOCK);
int unit_index = 0;
float temperature = temp_init;
curandState crstate;
curand_init(seed, 0, 0, &crstate);
if( i<net_size ){
net_state[i] = random_int(crstate, 0, 1);
}
do{
__syncthreads();
for(unsigned r=0; r<N_PROP; r++){
unit_index = random_int(crstate, 0, net_size-1);
if( i == 0 ){
dC = 0.0f;
}
//if(i<3 && j<3) printf("%d\n", unit_index);
for(int k=0; k<nodes_per_thread; k++){
int j = k*THREADS_PER_BLOCK + i;
shared_weights_array[tid] = ( j<net_size && j!=unit_index) ? weights[unit_index*net_size+j]*net_state[j] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
shared_weights_array[tid] += shared_weights_array[tid + s];
}
__syncthreads();
}
if (tid == 0){
dC += shared_weights_array[0];
}
__syncthreads();
}
if (i == 0){
float probability = 1 / (1 + expf(-dC / temperature));
//printf("dC--------->%f\nPROBABILITY--------->%f\n", dC, probability);
if( curand_uniform(&crstate)<probability ){
net_state[unit_index]=1;
}else{
net_state[unit_index]=0;
}
}else{
skipahead(1, &crstate);
}
}
temperature*=cooling_rate;
if(i==0) printf("%f\n", temperature);
}while( temperature > final_temp );
}
/*Params: 1. Number of nodes for the network
2. Initial Temperature
3. Final Temperature
4. Cooling Rate
5. Path to the file with the weights matrix
6. Seed
*/
int main(int narg, char** arg){
if(narg != 7){
perror("Wrong number of arguments.");
exit(1);
}
auto start = chrono::high_resolution_clock::now();
const unsigned int NET_SIZE = atoi(arg[1]);
const float INITIAL_TEMPERATURE = atof(arg[2]);
const float FINAL_TEMPERATURE = atof(arg[3]);
const float COOL_RATE = atof(arg[4]);
const unsigned SEED = atoi(arg[5]);
unsigned* net_states = new unsigned [NET_SIZE];
srand(SEED);
for(int i=0; i<NET_SIZE; i++){
net_states[i] = rand() % 2;
}
float* weights = new float [NET_SIZE*NET_SIZE];
ifstream matrix_file(arg[6], ifstream::in);
for(int i=0; i<NET_SIZE; i++){
for(int j=0; j<NET_SIZE; j++){
string x;
matrix_file>>x;
weights[i*NET_SIZE+j] = strtof( x.c_str(), NULL );
}
}
auto start_gpu = chrono::high_resolution_clock::now();
//Moving data to GPU
unsigned* g_net_states;
CUDA_CALL( cudaMalloc((void **)&g_net_states, NET_SIZE*sizeof(unsigned)) );
//CUDA_CALL( cudaMemcpy(g_net_states, net_states, NET_SIZE*sizeof(unsigned), cudaMemcpyHostToDevice) );
float* g_weights;
CUDA_CALL( cudaMalloc((float **)&g_weights, NET_SIZE*NET_SIZE*sizeof(float*)) );
CUDA_CALL( cudaMemcpy(g_weights, weights, NET_SIZE*NET_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
//Running kernel
unsigned threadsPerBlock = THREADS_PER_BLOCK;
unsigned blocksPerGrid = N_BLOCKS;
trainBM<<<blocksPerGrid, threadsPerBlock>>>(g_weights, g_net_states, NET_SIZE,
INITIAL_TEMPERATURE, FINAL_TEMPERATURE, COOL_RATE, SEED);
/*std::string error = cudaGetErrorString(cudaPeekAtLastError());
printf("%s\n", error.c_str());
error = cudaGetErrorString(cudaThreadSynchronize());
printf("%s\n", error.c_str());*/
//Moving result to CPU
CUDA_CALL( cudaMemcpy(net_states, g_net_states, NET_SIZE*sizeof(float), cudaMemcpyDeviceToHost) );
//Freeing GPU memory
CUDA_CALL( cudaFree(g_net_states) );
CUDA_CALL( cudaFree(g_weights) );
auto end_gpu = chrono::high_resolution_clock::now();
float C = 0;
for(int i=0; i<NET_SIZE; i++){
for(int j=i+1; j<NET_SIZE; j++){
if( net_states[i] & net_states[j] ) C += weights[i*NET_SIZE+j];
}
}
delete [] net_states;
delete [] weights;
auto end = chrono::high_resolution_clock::now();
cout<<"Final cost function: "<<C<<endl;
cout<<"GPU time: "<<chrono::duration_cast<chrono::milliseconds>(end_gpu-start_gpu).count()/1000.0f<<" seconds."<<endl;
cout<<"Total time: "<<chrono::duration_cast<chrono::milliseconds>(end-start).count()/1000.0f<<" seconds."<<endl;
return 0;
} |
13,127 | #include "includes.h"
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = exp(input[i]/temp - largest/temp);
sum += e;
output[i] = e;
}
for(i = 0; i < n; ++i){
output[i] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
} |
13,128 | #include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
__global__ void fPixelGenerator(int width, int frames, unsigned char* pic) {
int blockID = blockIdx.x;
int tBlocks = gridDim.x;
int fromBlocks = (blockID) * (frames / tBlocks);
int toBlocks = (blockID + 1) * (frames / tBlocks);
int threadID = threadIdx.x;
int tThreads = blockDim.x;
int fromThreads = (threadID) * (width / tThreads);
int toThreads = (threadID + 1) * (width / tThreads);
for (int frame = fromBlocks; frame < toBlocks; frame++) {
for (int row = fromThreads; row < toThreads; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f * cos(d/10.0f - frame/7.0f) / (d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
}
static void writeBMP(const int x, const int y, const unsigned char* const bmp, const char* const name) {
const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned char hdr[1078];
int i, j, c, xcorr, diff;
FILE* f;
xcorr = (x + 3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide
diff = xcorr - x;
for (i = 0; i < 54; i++) hdr[i] = bmphdr[i];
*((int*)(&hdr[18])) = xcorr;
*((int*)(&hdr[22])) = y;
*((int*)(&hdr[34])) = xcorr * y;
*((int*)(&hdr[2])) = xcorr * y + 1078;
for (i = 0; i < 256; i++) {
j = i * 4 + 54;
hdr[j+0] = i; // blue ColorTable
hdr[j+1] = 0; // green
hdr[j+2] = 0; // red
hdr[j+3] = 0; // dummy
}
f = fopen(name, "wb"); assert(f != NULL);
c = fwrite(hdr, 1, 1078, f); assert(c == 1078);
if (diff == 0) {
c = fwrite(bmp, 1, x * y, f); assert(c == x * y);
}
else {
*((int*)(&hdr[0])) = 0; // need up to three zero bytes
for (j = 0; j < y; j++) {
c = fwrite(&bmp[j * x], 1, x, f); assert(c == x);
c = fwrite(hdr, 1, diff, f); assert(c == diff);
}
}
fclose(f);
}
int main(int argc, char *argv[]) {
// check command line
if (argc != 5) {
fprintf(stderr, "usage: %s <frame_width> <num_frames> <num_blocks> <num_threads>\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
fprintf(stderr, "error: <frame_width> must be at least 100\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
fprintf(stderr, "error: <num_frames> must be at least 1\n");
exit(-1);
}
int blocks = atoi(argv[3]);
if (blocks < 1) {
fprintf(stderr, "error: <num_blocks> must be at least 1\n");
exit(-1);
};
int threads = atoi(argv[4]);
if (threads < 1) {
fprintf(stderr, "error: <num_threads> must be at least 1\n");
exit(-1);
};
if (frames % blocks != 0) {
fprintf(stderr, "Frames not divisible by number of blocks\n");
exit(-1);
}
if (width % threads != 0) {
fprintf(stderr, "Width size not divisible by number of threads\n");
exit(-1);
}
printf("computing %d frames of %d by %d picture - %d blocks w/ %d threads.\n", frames, width, width, blocks, threads);
// allocate picture array
unsigned char* pic = NULL;
cudaMallocManaged(&pic, frames * width * width * sizeof(unsigned char));
// start time
timeval start, end;
gettimeofday(&start, NULL);
// Pixel Generator
fPixelGenerator<<<blocks, threads>>>(width, frames, pic);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing frames to BMP files
if ((width <= 256) && (frames <= 100)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "wave%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
// Free memory
cudaFree(pic);
return 0;
} |
13,129 | #include <iostream>
#include <ctime>
#include <string>
struct Obstacle
{
public:
double c_x, c_y, r, v_x, v_y;
Obstacle()
{
c_x = 0;
c_y = 0;
r = 1.0;
v_x = 0;
v_y = 0;
}
Obstacle(double cx, double cy, double r0, double vx, double vy)
{
c_x = cx;
c_y = cy;
r = r0;
v_x = vx;
v_y = vy;
}
};
__device__ double infty_g(void)
{
const unsigned long long ieee754inf = 0x7ff0000000000000;
return __longlong_as_double(ieee754inf);
}
double infty_c(void)
{
const unsigned long long ieee754inf = 0x7ff0000000000000;
return (double)(ieee754inf);
}
__global__ void intersectTime_g(int n, Obstacle points[], double list[])
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// process each obstacle
for(int j = index; j < n; j += stride)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a.c_x * a.c_x + a.c_y * a.c_y);
double t_s = 0;
double t_e = 0;
//Case 1: object alrd collide w scooter
if(d <= 1)
{
t_s = 0;
t_e = infty_g();
}
//Case 2: object move in opposite dir w.r.t scooter
else if(a.c_x * a.v_x >= 0 || a.c_y * a.v_y >= 0)
{
t_s = infty_g();
t_e = infty_g();
} else
{
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
double delta_t = 2 * sqrt((1.0 + a.r) * (1.0 + a.r) - 1) / v;
t_s = (sqrt(d * d - 1.0) / v) - 0.5 * delta_t;
t_e = t_s + delta_t;
}
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
//for test output
//printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e);
}
}
void intersectTime_c(int n, Obstacle points[], double list[])
{
for(int j = 0; j < n; j++)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a.c_x * a.c_x + a.c_y * a.c_y);
double t_s = 0;
double t_e = 0;
//Case 1: object alrd collide w scooter
if(d <= 1)
{
t_s = 0;
t_e = infty_c();
}
//Case 2: object move in opposite dir w.r.t scooter
else if(a.c_x * a.v_x >= 0 || a.c_y * a.v_y >= 0)
{
t_s = infty_c();
t_e = infty_c();
} else
{
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
double delta_t = 2 * sqrt((1.0 + a.r) * (1.0 + a.r) - 1) / v;
t_s = (sqrt(d * d - 1.0) / v) - 0.5 * delta_t;
t_e = t_s + delta_t;
}
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
//for test output
//printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e);
}
}
double* gpu_discrete(int n, Obstacle points[], double list[])
{
Obstacle* points_g;
cudaMalloc(&points_g, n * sizeof(Obstacle));
double* list_g;
cudaMalloc(&list_g, n * 2 * sizeof(double));
cudaMemcpy(points_g, points, n * sizeof(Obstacle), cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
intersectTime_g<<<numBlocks, blockSize>>>(n, points_g, list_g);
cudaDeviceSynchronize();
cudaMemcpy(list, list_g, n * 2 * sizeof(double), cudaMemcpyDeviceToHost);
return list;
}
double* gpu_unified(int n, Obstacle points[], double list[])
{
Obstacle* points_g;
cudaMallocManaged(&points_g, n * sizeof(Obstacle));
double* list_g;
cudaMallocManaged(&list_g, n * 2 * sizeof(double));
cudaMemcpy(points_g, points, n * sizeof(Obstacle), cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
intersectTime_g<<<numBlocks, blockSize>>>(n, points_g, list_g);
cudaDeviceSynchronize();
return list_g;
}
double* cpu(int n, Obstacle points[], double list[])
{
intersectTime_c(n, points, list);
return list;
}
int main(int argc, char *argv[])
{
std::string response;
bool valid = false;
int n = 0;
std::cin >> n;
Obstacle* obstacles = new Obstacle[n];
double* result = new double[2 * n];
double c_x, c_y, r, v_x, v_y;
for(int i = 0; i < n; i++)
{
std::cin >> c_x >> c_y >> r >> v_x >> v_y;
obstacles[i] = Obstacle(c_x, c_y, r, v_x, v_y);
}
valid = true;
// while (valid)
{
std::cout << "Use GPU for computation ? (Y/N)" << std::endl;
std::cin >> response;
if(response == "Y" || response == "y" || response == "yes")
{
std::cout << "Use unified memory? (Y/N)" << std::endl;
std::cin >> response;
if(response == "Y" || response == "y" || response == "yes")
{
//unified gpu memory
std::cout << "unified gpu memory incurred.\n";
valid = false;
result = gpu_unified(n, obstacles, result);
} else if (response == "N" || response == "n" || response == "no")
{
//discrete gpu memory
std::cout << "discrete gpu memory incurred.\n";
valid = false;
result = gpu_discrete(n, obstacles, result);
} else {
std::cout << "invalid input. try again\n";
}
} else if (response == "N" || response == "n" || response == "no")
{
// cpu
std::cout << "cpu memory incurred.\n";
valid = false;
result = cpu(n, obstacles, result);
} else {
std::cout << "invalid input. try again\n";
}
}
//print output
for(int i = 0; i < n; i++)
{
std::cout << i << " " << result[2 * i] << " " << result[2 * i + 1] << std::endl;
}
}
|
13,130 | #include <stdio.h>
#include <math.h>
#include <iostream>
using namespace std;
#define TDB 1024 //Tamaño del bloque
#define hy 0.34
#define hx 0.34
#define LT 1 //lado tuberia
__device__ double my_floor(double num) {
if (num >= LLONG_MAX || num <= LLONG_MIN || num != num) {
return num;
}
int n = (int)num;
double d = (double)n;
if (d == num || num >= 0)
return d;
else
return d - 1;
}
__global__ void crearMalla(float *matriz, float *coeficientes_GPU, int nodos_x, int nodos_y, int pitch){
const unsigned int idx = threadIdx.x;
const unsigned int i = my_floor(idx / nodos_x);
const unsigned int j = idx % nodos_x;
const unsigned int n = nodos_x * nodos_y;
int k = 0, l = 0, columna = 0;
printf("thread:%i \n", idx);
float *row_a = (float *) ((char*)matriz + idx * pitch);
while(k < nodos_x && columna < n ){
while(l < nodos_y){
if( k == i - 1 && l == j )
row_a[columna] = *(coeficientes_GPU + 0);
else if( k == i + 1 && l == j )
row_a[columna] = *(coeficientes_GPU + 1);
else if( k == i && l == j )
row_a[columna] = *(coeficientes_GPU + 2); //
else if( k == i && l == j - 1 )
row_a[columna] = *(coeficientes_GPU + 3);
else if( k == i && l == j + 1 )
row_a[columna] = *(coeficientes_GPU + 4);
else
row_a[columna] = 0;
columna++;
l++;
}
l = 0;
if(k < nodos_x) k++;
else k = 0;
}
}
int main(){
const unsigned int nodos_x = ceil( (float)LT / (float)hx );
const unsigned int nodos_y = ceil( (float)LT / (float)hy );
const unsigned int NDH = nodos_x * nodos_y;
const unsigned int numero_bloques = ceil( (float) NDH / (float) TDB );
const unsigned int hilos_bloque = ceil( (float) NDH / (float) numero_bloques );
cout << " Se lanzaran " << numero_bloques << " bloque(s) de " << hilos_bloque << " hilos cada uno." << endl;
float* coeficientes__HOST = (float*) malloc(5);
*(coeficientes__HOST + 0 )= 1/(pow(hx,2)); //(i-1, j)
*(coeficientes__HOST + 1) = 1/(pow(hx,2)); //(i+1, j)
*(coeficientes__HOST + 2) = -2 *( (1/(pow(hx,2))) + (1/(pow(hy,2)))); //(i, j)
*(coeficientes__HOST + 3) = 1/(pow(hy,2)); //(i, j+1)
*(coeficientes__HOST + 4) = 1/(pow(hy,2)); //(i, j+1)
//for(unsigned x = 0 ; x < 5 ; ++x) cout << *(coeficientes__HOST + x) << "\n";
size_t pitch;
float *malla_salida_device; cudaMallocPitch((float**)&malla_salida_device, &pitch,NDH, NDH * sizeof(float));
float *coeficientes_GPU; cudaMalloc((void**)&coeficientes_GPU, 5 * sizeof(float));
cudaMemcpy(coeficientes_GPU, coeficientes__HOST, 5 * sizeof(float), cudaMemcpyHostToDevice);
crearMalla<<<numero_bloques, hilos_bloque>>>(malla_salida_device, coeficientes_GPU, nodos_x, nodos_y, pitch);
float malla_salida_host[NDH][NDH];
cudaMemcpy2D(malla_salida_host, NDH * sizeof(float), malla_salida_device, pitch, NDH * sizeof(float), NDH, cudaMemcpyDeviceToHost);
cudaFree(malla_salida_device); cudaFree(coeficientes_GPU);
cout.precision(3);
cout << "\n Nodos x: " << nodos_x << ", Nodos y: " << nodos_y << ".\n" <<endl;
for( int i = 0; i < NDH; i++){
for(int j = 0 ; j < NDH ; j++)
cout << malla_salida_host[i][j] << "\t";
cout << "\n";
}
free(coeficientes__HOST);
return 0;
} |
13,131 | #include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <algorithm>
#define HISTOGRAM_LENGTH 256
#define CBLOCK 512
//@@ insert code here
__global__
void FtoCKernel(float *IIData, unsigned char *ICData, int Ipixels){
int pos = blockIdx.x*CBLOCK+threadIdx.x;
if(pos < Ipixels) ICData[pos] = (unsigned char) 255.0 * IIData[pos];
}
__global__
void RtoGKernel(unsigned char *IIData, unsigned char *ICGData, int Ipixels){
int i = blockIdx.x*CBLOCK+threadIdx.x*3;
if(i < Ipixels*3){
ICGData[i] = (unsigned char) (0.21*IIData[i] + 0.71*IIData[i+1] + 0.07*IIData[i+2] );
}
}
__global__
void HKernel(unsigned char * IIData, unsigned int * IHistogram, int Ipixels){
int b = blockIdx.x *blockDim.x;
int t = threadIdx.x;
__shared__ int pHistogram[HISTOGRAM_LENGTH];
if(t < HISTOGRAM_LENGTH)pHistogram[t] = 0;
__syncthreads();
int i = b+t;
int s = blockDim.x* gridDim.x;
while(i < Ipixels){
atomicAdd( &(pHistogram[IIData[i]]), 1);
i+=s;
}
__syncthreads();
if(t < HISTOGRAM_LENGTH)atomicAdd( &(IHistogram[t]), pHistogram[t]);
}
__global__
void CDFKernel(unsigned int * IHistogram, float * ICDF, float fIarea){
int t = threadIdx.x;
if(t < HISTOGRAM_LENGTH) ICDF[t] = 0.0;
//reductio
for(int s = 1; s <= HISTOGRAM_LENGTH/2; s*=2){
int i = (t+1)*s*2-1;
if (i < HISTOGRAM_LENGTH) ICDF[i] += (float) ICDF[i-s] / fIarea;
__syncthreads();
}
//post-reduction
for(int s = HISTOGRAM_LENGTH/4; s > 0; s /= 2){
__syncthreads();
int j = (t+1)*s*2-1; // Same as other index
if(j+s < HISTOGRAM_LENGTH)ICDF[j+s] += (float) ICDF[j] / fIarea;
}
}
__global__
void CDFminKernel(float * I, float minv) {
int t = threadIdx.x;
__shared__ int pMin[HISTOGRAM_LENGTH];
if(t < HISTOGRAM_LENGTH)pMin[t] = I[t];//First half
__syncthreads();
for(int s = blockDim.x/2; s > 0; s/=2){
__syncthreads();
if(t < s) pMin[t] = (pMin[t] < pMin[t+s]) ? pMin[t+s] : pMin[t];
}
minv = pMin[0];
}
void populateArray(float a[], int l){
srand48(time(NULL));
float prev = drand48();
float nxt;
for(int i = 1; i < l; i++){
do{
nxt = drand48();
}while(nxt==prev);
a[i] = nxt;
prev = nxt;
}
}
float absDif(float a, float b){
float c = a-b;
if(c < 0)c*=-1;
return c;
}
int main(){
//int lengths[5] = {5, 10, 20, 40, 50};
//for(int x=0; x < 5; x++){
//int ilen = lengths[x];
int ilen = 5;
int imageWidth = ilen;
int imageHeight = ilen;
int imageChannels = 3;
float * hostInputImageData;
float * hostOutputImageData;
const char * inputImageFile;
float * dInputImageData;
unsigned char * dcharImageData;
unsigned char * charImageData; // host
unsigned char * dcharGImageData;
unsigned char * charGImageData; // host
unsigned int * dImageHistogram;
float * dImageCDF;
float dminCDF;
float * dOutputImageData;
printf("Size %dx%d\n", ilen, ilen);
int imageArea = imageWidth*imageHeight;
float I[imageArea*imageChannels];
populateArray(I, imageArea*imageChannels);
//Cuda malloc
cudaMalloc((void **) &dInputImageData, imageArea * imageChannels * sizeof(float));
cudaMalloc((void **) &dcharImageData, imageArea * imageChannels * sizeof(unsigned char));
// cudaMalloc((void **) &dImageHistogram, HISTOGRAM_LENGTH * sizeof(unsigned int));
// cudaMalloc((void **) &dImageCDF, HISTOGRAM_LENGTH * sizeof(float));
// cudaMalloc((void **) &dminCDF, sizeof(float));
// cudaMalloc((void **) &dOutputImageData, imageArea * sizeof(float));
//Cuda memcpy
cudaMemcpy(dInputImageData,
hostInputImageData,
imageArea * imageChannels *sizeof(float),
cudaMemcpyHostToDevice);
//Cuda conv 1
dim3 dimCGrid = (imageArea*imageChannels-1)/CBLOCK + 1;
dim3 dimCBlock = CBLOCK;
FtoCKernel<<<dimCGrid, dimCBlock>>>(dInputImageData,dcharImageData,imageArea*imageChannels);
cudaDeviceSynchronize();
//Cuda conv 2
//dim3 dimGGrid = (imageArea-1)/CBLOCK + 1;
//dim3 dimGBlock = CBLOCK;
//RtoGKernel<<<dimGGrid, dimGBlock>>>(dcharImageData, dcharGImageData, imageArea);
//cudaDeviceSynchronize();
//Cuda mcpy to compare
printf("Copying back\n");
cudaError_t err = cudaMemcpy(charImageData,dcharImageData, imageArea * imageChannels *sizeof(unsigned char), cudaMemcpyDeviceToHost);
printf("Copied\n");
printf("Error %s\n", cudaGetErrorString(err));
// for(int i = 0; i < imageArea*imageChannels; i ++){
// unsigned char dres = charImageData[i];
// unsigned char cres = (unsigned char) 255 * I[i];
// if(dres != cres)printf("%c != %c at %d\n",dres, cres, i);
// else printf("OK at %d\n", i);
// }
//cudaMemcpy(charGImageData,
// dcharGImageData,
// imageArea *sizeof(unsigned char),
// cudaMemcpyDeviceToHost);
//for(int i = 0; i < imageArea; i++){
// if(charGImageData[i] != (unsigned char) (0.21*charImageData[i] + 0.71*charImageData[i+1] + 0.07*charImageData[i+2] ))printf("Error at %d", i);
//}
cudaFree(dInputImageData);cudaFree(dcharImageData);cudaFree(dcharImageData);
cudaFree(dImageCDF); cudaFree(dOutputImageData);
//}
return 0;
}
|
13,132 | #include "includes.h"
__global__ void sum(long int* device_num, long int* device_den, long int* device_vet, int size, int x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + x;
int j;
if (i < size) {
for (j = i + 1; j < size; j++) {
if ((device_num[i] == device_num[j]) && (device_den[i] == device_den[j]))
device_vet[i]++;
}
}
} |
13,133 | #include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include <assert.h>
#define N 2//64
__device__ int bar(float* A) {
if(threadIdx.x != 0) {
return 0;
}
return 1;
}
__global__ void foo(float* A) {
int y = bar(A);
A[threadIdx.x]=y;
}
|
13,134 | #include "my_device_function.cuh"
__global__ void sigmoid(float *a,float *z,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
a[tid] = 1/(1+expf(-z[tid]));
tid+= blockDim.x * gridDim.x;
}
}
__global__ void sigmoid_inv(float *a,float *z,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
a[tid] = (1/(1+expf(-z[tid])))*(1 - 1/(1+expf(-z[tid])));
tid+= blockDim.x * gridDim.x;
}
}
__global__ void relu(float *a,float *z,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(z[tid] > 0 ) a[tid] = z[tid];
else a[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void relu_inv(float *a,float *z,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(z[tid] > 0 ) a[tid] = 1.0;
else a[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void new_activation(float *a,float *z,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
// if(z[tid] > 0 ) a[tid] = 1.0;
// else a[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void new_activation_inv(float *a,float *z,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
// if(z[tid] > 0 ) a[tid] = 1.0;
// else a[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void deliver_front_to_rear(float *front,float *rear,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
rear[tid] = front[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void add_bias(float *z,float *b,long column,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
z[tid] += b[tid % column];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void last_delta_before_transpose(float *temp, float *y,float *T,long batch_size,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
temp[tid] = (y[tid]-T[tid])/(2*batch_size);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void transpose(float *after, float *before,long before_columns,long before_rows)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
long x,y;
while(tid < before_columns*before_rows)
{
y = tid % before_columns;
x = tid / before_columns;
after[IDX2C(x,y,before_rows)] = before[IDX2C(y,x,before_columns)];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void basic_multi(float *a,float *b,float *c, long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = a[tid]*b[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void loss_cross_entropy(float *target,float *y, float * result,long last_neural,long batch_size)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < last_neural*batch_size)
{
result[tid] = -0.5*(target[tid]*logf(y[tid] + 0.000000001) + (1.0 - target[tid])*logf(1-y[tid] + 0.000000001));
tid+= blockDim.x * gridDim.x;
}
}
__global__ void matching(float *target,float *y, float * result,long last_neural,long batch_size)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
int target_inx;
int y_inx;
while(tid < batch_size)
{
float max = 0.0;
for(int i = 0 ; i < last_neural ; i++)
{
if(target[IDX2C(i,tid,last_neural)] > 0.9)
{
target_inx = i;
}
if(y[IDX2C(i,tid,last_neural)] > max)
{
max = y[IDX2C(i,tid,last_neural)];
y_inx = i;
}
}
if(target_inx == y_inx) result[tid] = 1.0;
else result[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void weight_update(float *w,float *delta_w, float alpha,float ramda,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
w[tid] = w[tid] - alpha*(delta_w[tid] + ramda*w[tid]);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void bias_update(float *b,float *delta_b, float alpha,long n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
b[tid] = b[tid] - alpha*delta_b[tid];
tid+= blockDim.x * gridDim.x;
}
}
|
13,135 | #include<iostream>
#include<vector>
#include<curand.h>
#include<curand_kernel.h>
const int SHARED_MEM = 256;
__global__ void piEstimate(double *result, int N){
auto index = blockDim.x*blockIdx.x+threadIdx.x;
auto stride = blockDim.x;
auto offset = 0;
__shared__ double shared_mem[SHARED_MEM];
shared_mem[threadIdx.x]= 0; // initializing to 0
__syncthreads();
curandState_t state;
while(index+offset < N){
curand_init(123456789,0,0,&state);
double x = curand_normal(&state);
double y = curand_normal(&state);
if(x*x + y*y <= 1){
shared_mem[threadIdx.x]++;
}
offset += stride;
}
// __syncthreads();
// Reduction
int i = blockIdx.x/2;
while(i > 0){
shared_mem[threadIdx.x] += shared_mem[threadIdx.x+i];
i/=2;
}
__syncthreads();
// First element is the output;
if(threadIdx.x == 0){
*result = shared_mem[0];
}
}
int main(){
auto N = 10240;
double *h_pi;
h_pi = (double*)malloc(sizeof(double));
double *d_pi;
cudaMalloc(&d_pi, sizeof(double));
auto threadsPerBlock = 32;
auto blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
piEstimate<<<blocksPerGrid, threadsPerBlock>>>(d_pi, N);
cudaMemcpy(h_pi, d_pi, sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "Value of pi is: " << *h_pi*100/N<< std::endl;
free(h_pi);
cudaFree(d_pi);
return 0;
}
|
13,136 | #pragma once
#include <cuda.h>
#include <stdio.h>
// conjugate gradient solver. by SKH.
#define GPU_SOLVER_MAX_ITER 3000
#define GPU_SOLVER_EPS 0.01
#define BLOCK_SIZE 512
__device__ float dot(float* a, float *b, int n)
{
float ret = 0;
for(int i=0;i<n;i++)
{
ret += a[i] * b[i];
}
return ret;
}
__global__ void gpuCGSolve(float* A, float* x, float* b,
float* d, float* r, float* q,
int n)
{
int id = blockIdx.x *BLOCK_SIZE + threadIdx.x;
if(id < n)
{
int i = 0;
float alpha, beta, deltaOld, delta0, deltaNew;
float _b = b[id];
r[id] = _b - dot(&A[id * n], x, n);
d[id] = r[id];
__syncthreads();
deltaNew = dot(r,r,n);
delta0 = deltaNew;
while(i<GPU_SOLVER_MAX_ITER && deltaNew > (GPU_SOLVER_EPS * GPU_SOLVER_EPS) * delta0)
{
q[id] = dot(&A[id * n],d,n);
__syncthreads();
alpha = (deltaNew)/dot(d,q,n);
x[id] += alpha * d[id];
__syncthreads();
if(i%50)
{
r[id] = _b - dot(&A[id * n], x,n);
}
else
{
r[id] -= alpha * q[id];
}
deltaOld = deltaNew;
deltaNew = dot(r,r,n);
beta = deltaNew/deltaOld;
d[id] = r[id] + beta * d[id];
i++;
__syncthreads();
}
}
}
void CGSolverGPU(float* A, float* x, float* b, int n)
{
float *gpu_A, *gpu_x, *gpu_b,
*gpu_d, *gpu_r, *gpu_q;
int ARR_SIZE = sizeof(float) * n;
//performance testing/////////////////////////
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//end///////////////////////////////////////////
cudaMalloc(&gpu_A, sizeof(float) * n * n);
cudaMemcpy(gpu_A, A, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMalloc(&gpu_x, ARR_SIZE);
cudaMalloc(&gpu_b, ARR_SIZE);
cudaMemcpy(gpu_x, x, ARR_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, ARR_SIZE, cudaMemcpyHostToDevice);
cudaMalloc(&gpu_d, ARR_SIZE);
cudaMalloc(&gpu_r, ARR_SIZE);
cudaMalloc(&gpu_q, ARR_SIZE);
int no_blocks = (n + BLOCK_SIZE - 1)/BLOCK_SIZE;
gpuCGSolve <<<no_blocks, BLOCK_SIZE>>> (gpu_A, gpu_x, gpu_b, gpu_d, gpu_r, gpu_q, n);
cudaMemcpy(x, gpu_x, ARR_SIZE, cudaMemcpyDeviceToHost);
cudaFree(gpu_A);
cudaFree(gpu_x);
cudaFree(gpu_b);
cudaFree(gpu_d);
cudaFree(gpu_r);
//performance testing///////////////////////
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float timing;
cudaEventElapsedTime( &timing, start, stop );
printf("Time taken %.4f ms\n",timing);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//end//////////////////////////////////////////
}
|
13,137 | #include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <iostream>
typedef long int li;
#define HANDLE_ERROR(err) \
do { if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); exit(0);} } while (0)
struct comparator
{
__host__ __device__ bool operator()(double a, double b)
{
return std::fabs(a) < std::fabs(b);
}
};
__global__ void swapRows(double* device_data, long int ind, long int i, long int n)
{
long int idx = threadIdx.x + blockIdx.x * blockDim.x;
long int offSet = gridDim.x * blockDim.x;
double temp;
for (unsigned long int j = idx; j < n + 1; j += offSet)
{
temp = device_data[j * n + i];
device_data[j * n + i] = device_data[j * n + ind];
device_data[j * n + ind] = temp;
}
}
__global__ void maxDiv(double* device_data, long int ind, long int i, long int n)
{
long int idx = threadIdx.x + blockIdx.x * blockDim.x;
long int offSet = gridDim.x * blockDim.x;
double maxElement = 1 / device_data[i * n + ind];
for (unsigned long int j = idx + i+1; j < n + 1; j += offSet)
{
device_data[j * n + i] *= maxElement;
}
}
__global__ void forward(double* data, long int i, long int n) {
long int idx = blockIdx.x * blockDim.x + threadIdx.x;
long int idy = blockIdx.y * blockDim.y + threadIdx.y;
long int offsetX = gridDim.x * blockDim.x;
long int offsetY = gridDim.y * blockDim.y;
for (unsigned long int j = idx + i + 1; j < n; j += offsetX) {
for (unsigned long int k = idy + i + 1; k < n + 1; k += offsetY) {
data[j + k * n] -= data[j + i * n] * data[i + k * n] / data[i + i * n];
}
}
}
int main(void) {
std::ios_base::sync_with_stdio(false);
std::cin.tie(nullptr);
long int n;
std::cin >> n;
long int nSquare = n * n;
double *host_data = new double[nSquare + n];
for (unsigned long int i = 0; i < n; i++) {
for (unsigned long int j = 0; j < n; j++) {
std::cin >> host_data[j * n + i];
}
}
for (unsigned long int i = 0; i < n; i++) {
std::cin >> host_data[nSquare + i];
}
// Memory allocation and initialization
double *device_data;
cudaMalloc((void **) &device_data, (nSquare + n) * sizeof(double));
cudaMemcpy(device_data, host_data, (nSquare + n) * sizeof(double), cudaMemcpyHostToDevice);
// Pointers for max_elem with thrust
comparator comp;
thrust::device_ptr<double> begin_p, max_p;
for (long int i = 0, ind; i < n - 1; i++) {
// Finding max element
begin_p = thrust::device_pointer_cast(device_data + i * n);
max_p = thrust::max_element(begin_p + i, begin_p + n, comp);
ind = max_p - begin_p;
if (ind != i) {
swapRows<<<512, 512>>>(device_data, ind, i, n);
ind = i;
}
forward<<<dim3(32, 32), dim3(32, 32)>>>(device_data, i, n);
}
cudaMemcpy(host_data, device_data, (nSquare + n) * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(device_data);
for (long int j = n - 1; j >= 0; j--) {
double sum = host_data[nSquare + j];
for (long int i = (n + 1) * j + n, tempCounter = 1; i < nSquare; i += n, tempCounter++) {
sum -= host_data[i] * host_data[nSquare + j + tempCounter];
}
host_data[nSquare + j] = sum / host_data[j * n + j];
}
std::cout.precision(10);
std::cout.setf(std::ios::scientific);
for (unsigned long int i = nSquare, cap = i + n; i < cap; i++) {
std::cout << host_data[i] << ' ';
}
delete [] host_data;
}
|
13,138 | #include "includes.h"
__global__ void writeKernel(float* vec, int len)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= c_size.x || j >= c_size.y || k >= c_size.z)
return;
for(auto w = 0; w < len; ++w)
{
long int id = w + len * (i + c_size.x * (j + k * c_size.y));
vec[id] = id;
}
} |
13,139 | #include "includes.h"
/*
Jaitirth Jacob - 13CO125 Vidit Bhargava - 13CO151
*/
#define ITERATIONS 4 //Repeat the experiment for greater accuracy
#define N 1000000 //Array Size
#define min_threads 16
#define max_threads 1024
__global__ void add(int *a, int *b, int *c, int tpb)
{
//Find the correct thread index in the grid
int i = blockIdx.x * tpb + threadIdx.x;
c[i] = a[i] + b[i];
} |
13,140 | // for (size_t i = 0; i < SIZE; i++) {
// std::cout << arr[i] << std::endl;
// }
|
13,141 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) {
/*YOUR CODE HERE*/
}
|
13,142 | #include "cuda_runtime.h"
#include <stdio.h>
#include <time.h>
#include <chrono>
const int a_row = 512;
const int a_col = 3136;
const int b_row = 3136;
const int b_col = 1;
//2维网格2维线程块 最常用
__global__ void VectorMultiply(float** a, float** b, float** c) {
int thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; //x是线程块的x做行 线程的x做列
int thread_y_id = blockIdx.y * blockDim.y + threadIdx.y; //y是线程块的y做行 线程的y做列
for (int k = 0; k < a_col; k++) {
c[thread_y_id][thread_x_id] += (a[thread_y_id][k] + b[k][thread_x_id]);
}
}
int main() {
std::chrono::system_clock::time_point begin = std::chrono::system_clock::now();
//二维指针 指向一维指针(数据指针)
float* a[a_row] = { NULL };
float* b[b_row] = { NULL };
float* c[a_row] = { NULL };
float data_a[a_row * a_col] = { 0.0 };
float data_b[b_row * b_col] = { 0.0 };
float data_c[a_row * b_col] = { 0.0 };
//二维指针 指向一维指针(数据指针)
float** device_a = NULL;
float** device_b = NULL;
float** device_c = NULL;
float* device_data_a = NULL;
float* device_data_b = NULL;
float* device_data_c = NULL;
//分配显存
cudaMalloc((void**)&device_a, sizeof(float*) * a_row);
cudaMalloc((void**)&device_b, sizeof(float*) * b_row);
cudaMalloc((void**)&device_c, sizeof(float*) * a_row);
cudaMalloc((void**)&device_data_a, sizeof(float) * a_row * a_col);
cudaMalloc((void**)&device_data_b, sizeof(float) * b_row * b_col);
cudaMalloc((void**)&device_data_c, sizeof(float) * a_row * b_col);
for (int i = 0; i < a_row * a_col; i++) {
data_a[i] = 1;
}
for (int i = 0; i < b_row * b_col; i++) {
data_b[i] = 1;
}
//主机二级指针存放着设备一级指针(数据指针)的地址
//再通过cudacopy 传给设备二级指针 让设备二级指针和设备一级指针关联(数据指针)
//而设备一级指针已经通过主机一级指针cudacopy了
for (int i = 0; i < a_row; i++) {
a[i] = device_data_a + i * a_col;
c[i] = device_data_c + i * b_col;
}
for (int i = 0; i < b_row; i++) {
b[i] = device_data_b + i * b_col;
}
//将内存中a和b数组的值复制到GPU中显存中
cudaMemcpy(device_a, a, sizeof(float*) * a_row, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, b, sizeof(float*) * b_row, cudaMemcpyHostToDevice);
cudaMemcpy(device_c, c, sizeof(float*) * a_row, cudaMemcpyHostToDevice);
cudaMemcpy(device_data_a, data_a, sizeof(float) * a_row * a_col, cudaMemcpyHostToDevice);
cudaMemcpy(device_data_b, data_b, sizeof(float) * b_row * b_col, cudaMemcpyHostToDevice);
//一个kernel函数由一个gpu的一个grid执行
//调用核函数 cpu调用 gpu运行
dim3 dim_block(32, 16); //一个线程块block包含 512个线程threads(最多不超过512个)
dim3 dim_grid((b_col + dim_block.x - 1) / dim_block.x, (a_row + dim_block.y - 1) / dim_block.y); //一个grid网格包含n / 512个线程块blocks(为了充分利用sm blocks尽可能多)
VectorMultiply<<<dim_grid, dim_block>>>(device_a, device_b, device_c);
//1级指针设备 给1级指针主机 赋值
cudaMemcpy(data_c, device_data_c, sizeof(float) * a_row * b_col, cudaMemcpyDeviceToHost);
for (int i = 0; i < a_row * a_col; i++)
printf("a[%d]=%.0f, ", i, data_a[i]);
for (int i = 0; i < b_row * b_col; i++)
printf("b[%d]=%.0f, ", i, data_b[i]);
for (int i = 0; i < a_row * b_col; i++)
printf("c[%d]=%.0f\n", i, data_c[i]);
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
//设置单位为秒
std::chrono::duration<int, std::milli> milli = std::chrono::duration_cast<
std::chrono::milliseconds>(end - begin);
printf("\n程序耗时:%dms\n", milli.count());
//释放gpu显存
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
cudaFree(device_data_a);
cudaFree(device_data_b);
cudaFree(device_data_c);
return 0;
}
|
13,143 | #include "includes.h"
__global__ void CropKernel(float min, float max, float* input, float* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if(id < size)
{
output[id] = fmaxf(fminf(input[id], max), min);
}
} |
13,144 | #include <stdio.h>
#include<cuda.h>
const int N = 100 ;//256
__global__ void MatrixAdd_CUDA(int *A, int *B, int *C) {
int i= blockIdx.y*blockDim.y+ threadIdx.y;
int j = blockIdx.x*blockDim.x+ threadIdx.x;
*(C + i*N + j) = *(A + i*N + j)+ *(B + i*N + j);
}
void DisplayMatrix(int *A, int row, int col)
{
int i,j;
for(i=0;i<row;i++){
for(j=0;j<col;j++) printf(" %d",*(A+i*col+j));
printf("\n");
}
}
int main (void)
{
int *Host_a, *Host_b, *Host_c;
Host_a = (int *) malloc ((N*N)*sizeof(int));
Host_b = (int *) malloc ((N*N)*sizeof(int));
Host_c = (int *) malloc ((N*N)*sizeof(int));
int *dev_a , *dev_b, *dev_c ;
cudaMalloc(&dev_a , (N*N)*sizeof(int));
cudaMalloc(&dev_b , (N*N)*sizeof(int));
cudaMalloc(&dev_c , (N*N)*sizeof(int));
for ( int i = 0; i <N ; i++ )
for(int j=0;j<N;j++){
*(Host_a+i*N+j)=i*2+1;
*(Host_b+i*N+j)=i+j;
}
cudaMemcpy (dev_a , Host_a , (N*N)*sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy (dev_b , Host_b , (N*N)*sizeof(int) , cudaMemcpyHostToDevice);
//int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock
dim3 threadsPerBlock(10, 10); //16 16
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
MatrixAdd_CUDA <<< numBlocks, threadsPerBlock >>> (dev_a , dev_b , dev_c ) ;
cudaMemcpy(Host_c , dev_c , (N*N)*sizeof(int) , cudaMemcpyDeviceToHost);
DisplayMatrix(Host_c,10,10);
cudaFree (dev_a) ;
cudaFree (dev_b) ;
cudaFree (dev_c) ;
return 0 ;
} |
13,145 | #include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(int * v0, int * v1, std::size_t size){
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
v0[tid] += v1[tid];
}
}
int main(){
std::size_t size = 10000;
std::vector<int> v0(size);
std::vector<int> v1(size);
int * v0_d = nullptr;
int * v1_d = nullptr;
for(std::size_t i = 0; i < v0.size(); i++){
v0[i] = v1[i] = i;
}
cudaError_t err;
err = cudaMalloc(&v0_d, v0.size() * sizeof(int));
if(err != cudaSuccess){
std::cerr << cudaGetErrorString(err) << std::endl;
return 1;
}
err = cudaMalloc(&v1_d, v1.size() * sizeof(int));
cudaMemcpy(v0_d,v0.data(),v0.size() * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(v1_d,v0.data(),v1.size() * sizeof(int),cudaMemcpyHostToDevice);
dim3 block(1024);
dim3 grid((size-1)/ block.x +1);
vecAdd<<<grid,block>>>(v0_d,v1_d,v0.size());
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess){
std::cerr << cudaGetErrorString(err) << std::endl;
return 1;
}
cudaMemcpy(v0.data(),v0_d,v0.size() * sizeof(int),cudaMemcpyDeviceToHost);
for(std::size_t i = 0; i < v0.size(); i++){
printf("%d\n",v0[i] );
}
cudaFree(v0_d);
cudaFree(v1_d);
}
|
13,146 | #include <bits/stdc++.h>
using namespace std;
/*
* Here I try to use one-dimensional arrays to realize a Binary Searching Tree, such that a tree can be built on the memory of GPU threads.
* Since we finally want to model map on threads, so the thing stored on each tree node is (key,value> pairs.
* For simplicity, we only consider interger keys and values. (For float key, we need to add a small margin for the operator "==".)
* We test the model by inputing a operation list, including insertion: 'i', searching: 's', getting value: 'g', removal: 'r', checking empty: 'e', checking full: 'f', and getting size: 'z'
* The disadvantage of using GPU is that once the memory automatically release the memory if it gets out of the kernel function.
*/
const int MAX_SIZE = 4096;
const int MAX_BLOCK_SIZE = 1024;
const int MAX_NUM_BLOCK = 1024;
const int MAX_DATA = 20000;
// The following three are most simply functions: as their name describes
__device__ int isEmpty(int rest){
return (rest == MAX_SIZE - 1);
}
__device__ void isFull(int *full, int rest){
(*full) = (!rest);
return;
}
__device__ void getSize(int *size, int rest){
(* size) = (MAX_SIZE - rest - 1);
return;
}
// The following function for searching a key in the item: found: return true; not found: return false.
__device__ void searchItem(int *exist, int rest, int root, int *keys, int *children, int key){
(*exist) = 0;
if(rest == MAX_SIZE - 1) return;
int p = root;
while(key != keys[p]){
if(key < keys[p]){
if(!(children[p]/MAX_SIZE)) return;
p = children[p]/MAX_SIZE;
}
else{
if(!(children[p]%MAX_SIZE)) return;
p = children[p]%MAX_SIZE;
}
}
(*exist) = 1;
return;
}
// The following function for getting the value for an input key (Here we do not insert the key if the key does not exist previously)
__device__ void getItem(int *value, int rest, int root, int *keys, int *values, int *children, int key){
assert(rest < MAX_SIZE-1);
int p = root;
while(key != keys[p]){
if(key < keys[p]){
assert(children[p]/MAX_SIZE);
p = children[p]/MAX_SIZE;
}
else{
assert(children[p]%MAX_SIZE);
p = children[p]%MAX_SIZE;
}
}
(*value) = values[p];
return;
}
// The following function is used for inserting a key-value pair into the tree.
__device__ void insertItem(int *rest, int *root, int *rest_idx, int *keys, int *values, int *parent, int *children, int key, int value){
if((*rest) == MAX_SIZE - 1){
(*rest)--;
(*root) = rest_idx[(*rest)];
keys[(*root)] = key;
values[(*root)] = value;
parent[(*root)] = 0;
children[0] = (*root);
return;
}
int p = (*root);
while(key != keys[p]){
if(key < keys[p]){
if(!(children[p]/MAX_SIZE)){
(*rest)--;
int idx = rest_idx[(*rest)];
keys[idx] = key;
values[idx] = value;
parent[idx] = p;
children[p] += idx*MAX_SIZE;
return;
}
p = children[p]/MAX_SIZE;
}
else{
if(!(children[p]%MAX_SIZE)){
(*rest)--;
int idx = rest_idx[(*rest)];
keys[idx] = key;
values[idx] = value;
parent[idx] = p;
children[p] += idx;
return;
}
p = children[p]%MAX_SIZE;
}
}
values[p] = value;
return;
}
// The following function is used to remove a node from the tree.
__device__ void removeItem(int *rest, int *root, int *rest_idx, int *keys, int *parent, int *children, int key){
if((*rest) == MAX_SIZE - 1) return;
int p = (*root);
while(key != keys[p]){
if(key < keys[p]){
if(!(children[p]/MAX_SIZE)) return;
p = children[p]/MAX_SIZE;
}
else{
if(!(children[p]%MAX_SIZE)) return;
p = children[p]%MAX_SIZE;
}
}
int par = parent[p], cur = p;
if(!(children[p]/MAX_SIZE)) cur = children[p]%MAX_SIZE;
else if(!(children[children[p]/MAX_SIZE]%MAX_SIZE)){
cur = children[p]/MAX_SIZE;
children[cur] += children[p]%MAX_SIZE;
parent[children[p]%MAX_SIZE] = cur;
}
else{
cur = children[children[p]/MAX_SIZE]%MAX_SIZE;
while(children[cur]%MAX_SIZE) cur = children[cur]%MAX_SIZE;
children[parent[cur]] -= cur;
children[cur] = children[p];
parent[children[p]/MAX_SIZE] = cur;
parent[children[p]%MAX_SIZE] = cur;
}
if(children[par]/MAX_SIZE == p) children[par] += (cur - p)*MAX_SIZE;
else children[par] += cur - p;
parent[cur] = par;
if(p == (*root)) (*root) = cur;
rest_idx[(*rest)] = p;
(*rest)++;
return;
}
__global__ void procOperations(int N_threads, int *rest, int *root, int *rest_idx, int *keys, int *values, int *parent, int *children, int N_op, char *ops, int *inputs,int *ans, int *sync){
int cnt = 0, g_idx = blockDim.x * blockIdx.x + threadIdx.x;
for(int i=0;i<N_op;++i){
if(cnt >= N_threads) cnt -= N_threads;
if(ops[i] == 'e' && g_idx == cnt){
ans[i] = isEmpty((*rest));
cnt++;
}
else if(ops[i] == 'f' && g_idx == cnt){
isFull(&ans[i], (*rest));
cnt++;
}
else if(ops[i] == 'z' && g_idx == cnt){
getSize(&ans[i], (*rest));
cnt++;
}
else if(ops[i] == 's' && g_idx == cnt){
searchItem(&ans[i], (*rest), (*root), keys, children, inputs[i]);
cnt++;
}
else if(ops[i] == 'g' && g_idx == cnt){
getItem(&ans[i], (*rest), (*root), keys, values, children, inputs[i]);
}
else if(ops[i] == 'r'){
atomicAdd(sync, 0);
if(g_idx == 0) removeItem(rest, root, rest_idx, keys, parent, children, inputs[i]);
atomicAdd(sync, 0);
cnt = 0;
}
else{
atomicAdd(sync, 0);
if(g_idx == 0) insertItem(rest, root, rest_idx, keys, values, parent, children, inputs[i]/MAX_DATA, inputs[i]%MAX_DATA);
atomicAdd(sync, 0);
cnt = 0;
}
}
return;
}
class CudaBST{
int root, rest, *rest_idx, *keys, *values, *parent, *children;
int *dev_root, *dev_rest, *dev_rest_idx, *dev_keys, *dev_values, *dev_parent, *dev_children, *sync;
int block_size, num_blocks, num_threads;
int *dev_inputs, N_ops, *dev_ans;
char *dev_ops;
public:
CudaBST(int b_size, int n_blocks){
assert(b_size <= MAX_BLOCK_SIZE);
assert(n_blocks <= MAX_NUM_BLOCK);
block_size = b_size;
num_blocks = n_blocks;
num_threads = num_blocks * block_size;
// Assign memories for CPU variables
rest_idx = new int [MAX_SIZE];
keys = new int [MAX_SIZE];
values = new int [MAX_SIZE];
parent = new int [MAX_SIZE];
children = new int [MAX_SIZE];
// Assign memories for GPU variables
cudaMalloc((void **)&dev_root, 1*sizeof(int));
cudaMalloc((void **)&dev_rest, 1*sizeof(int));
cudaMalloc((void **)&sync, 1*sizeof(int));
cudaMalloc((void **)&dev_rest_idx, MAX_SIZE*sizeof(int));
cudaMalloc((void **)&dev_keys, MAX_SIZE*sizeof(int));
cudaMalloc((void **)&dev_values, MAX_SIZE*sizeof(int));
cudaMalloc((void **)&dev_parent, MAX_SIZE*sizeof(int));
cudaMalloc((void **)&dev_children, MAX_SIZE*sizeof(int));
}
// Initiating the BST
void init(){
root = 0;
rest = MAX_SIZE - 1;
for(int i=0;i<MAX_SIZE;++i) rest_idx[i] = MAX_SIZE - 1 - i;
}
// Transfer operation list onto GPU
void loadOperations(int n_ops, char *ops, int *inputs){
N_ops = n_ops;
cudaMalloc((void **)&dev_ops, N_ops*sizeof(char));
cudaMemcpy(dev_ops, ops, N_ops*sizeof(char), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dev_inputs, N_ops*sizeof(int));
cudaMemcpy(dev_inputs, inputs, N_ops*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dev_ans, N_ops*sizeof(int));
}
// Processing the operation list
void runOperations(int *ans){
cudaMemcpy(dev_ans, ans, N_ops*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_rest, &rest, 1*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_root, &root, 1*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_rest_idx, rest_idx, MAX_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_keys, keys, MAX_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_values, values, MAX_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_parent, parent, MAX_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_children, children, MAX_SIZE*sizeof(int), cudaMemcpyHostToDevice);
procOperations<<<num_blocks, block_size>>>(num_threads, dev_rest, dev_root, dev_rest_idx, dev_keys, dev_values, dev_parent, dev_children, N_ops, dev_ops, dev_inputs, dev_ans, sync);
cudaMemcpy(&rest, dev_rest, 1*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&root, dev_root, 1*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(rest_idx, dev_rest_idx, MAX_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(keys, dev_keys, MAX_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(values, dev_values, MAX_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(parent, dev_parent, MAX_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(children, dev_children, MAX_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(ans, dev_ans, N_ops*sizeof(int), cudaMemcpyDeviceToHost);
//for(int i=0;i<N_ops;++i)cout<<ans[i];
}
// Finalize the structure and output results
void releaseOps(){
cudaFree(dev_ops);
cudaFree(dev_ans);
cudaFree(dev_inputs);
return;
}
~CudaBST(){
delete [] rest_idx;
delete [] keys;
delete [] values;
delete [] parent;
delete [] children;
cudaFree(dev_rest);
cudaFree(dev_root);
cudaFree(sync);
cudaFree(dev_rest_idx);
cudaFree(dev_keys);
cudaFree(dev_values);
cudaFree(dev_parent);
cudaFree(dev_children);
}
};
class TestCudaBST{
int N_ops, *inputs, *cuda_rst, *serial_rst;
char *ops;
CudaBST cuda_bst;
inline int getData(){ return rand()%MAX_DATA; }
public:
TestCudaBST():cuda_bst(1024, 1024){}
void getOpsList(int n1, int n2, int n_search = 1E6, int n_at = 1E6){
srand(0);
vector<int> in,tmp;
unordered_set<int> tmp_keys;
string op;
in.push_back(0);
in.push_back(0);
in.push_back(0);
op += "efz";
// Insert a bunch of data
assert(n1 < MAX_SIZE-1);
for(int i=0;i<n1;++i){
op += "i";
int u = getData();
in.push_back(u*MAX_DATA + getData());
tmp_keys.insert(u);
}
in.push_back(0);
in.push_back(0);
in.push_back(0);
op += "efz";
for(int i=0;i<n_search;++i){
op += "s";
in.push_back(rand()%MAX_DATA);
}
tmp.assign(tmp_keys.begin(), tmp_keys.end());
for(int i=0;i<n_at;++i){
op += "g";
in.push_back(tmp[rand()%(int)tmp.size()]);
}
// Remove some of data from the tree
assert(n2<=(int)tmp_keys.size());
for(int i=0;i<n2;++i){
op += "r";
int u = *tmp_keys.begin();
tmp_keys.erase(tmp_keys.begin());
in.push_back(u);
}
in.push_back(0);
in.push_back(0);
in.push_back(0);
op += "efz";
for(int i=0;i<n_search;++i){
op += "s";
in.push_back(rand()%MAX_DATA);
}
tmp.assign(tmp_keys.begin(), tmp_keys.end());
for(int i=0;i<n_at;++i){
op += "g";
in.push_back(tmp[rand()%(int)tmp.size()]);
}
// Insert some data again
while((int)tmp_keys.size() < MAX_SIZE-1){
op += "i";
int u = getData();
in.push_back(u*MAX_DATA + getData());
tmp_keys.insert(u);
}
in.push_back(0);
in.push_back(0);
in.push_back(0);
op += "efz";
for(int i=0;i<n_search;++i){
op += "s";
in.push_back(rand()%MAX_DATA);
}
tmp.assign(tmp_keys.begin(), tmp_keys.end());
for(int i=0;i<n_at;++i){
op += "g";
in.push_back(tmp[rand()%(int)tmp.size()]);
}
assert((int)op.size() == (int)in.size());
N_ops = (int)op.size();
inputs = new int [N_ops];
ops = new char [N_ops];
serial_rst = new int [N_ops];
cuda_rst = new int [N_ops];
memset(serial_rst, 0, sizeof(serial_rst));
memset(cuda_rst, 0, sizeof(cuda_rst));
for(int i=0;i<N_ops;++i) ops[i] = op[i], inputs[i] = in[i];
cout<<"Randomly Generating "<<N_ops<<" different operations"<<endl<<endl;
}
void checkSerial(){
map<int, int> test_map;
cout<<"=====================================Serial Run (using STL map)====================================="<<endl;
clock_t start_time = clock(), end_time;
for(int i=0;i<N_ops;++i){
if(ops[i] == 'e') serial_rst[i] = test_map.empty();
else if(ops[i] == 'f') serial_rst[i] = ((int)test_map.size() == MAX_SIZE - 1);
else if(ops[i] == 'z') serial_rst[i] = (int)test_map.size();
else if(ops[i] == 's') serial_rst[i] = test_map.count(inputs[i]);
else if(ops[i] == 'g') serial_rst[i] = test_map[inputs[i]];
else if(ops[i] == 'i') test_map[inputs[i]/MAX_DATA] = inputs[i]%MAX_DATA;
else test_map.erase(inputs[i]);
}
end_time = clock();
double dt = double(end_time - start_time)/CLOCKS_PER_SEC;
cout<<setprecision(6);
cout<<" TIME USAGE: \n";
cout<<" "<<dt<<" s "<<endl<<endl;
cout<<"===================================================================================================="<<endl<<endl;
}
void checkParallel(){
cuda_bst.init();
cuda_bst.loadOperations(N_ops, ops, inputs);
cout<<"=================================Parallel Run (using vecterized BST)================================="<<endl;
clock_t start_time = clock(), end_time;
cuda_bst.runOperations(cuda_rst);
end_time = clock();
double dt = double(end_time - start_time)/CLOCKS_PER_SEC;
cout<<setprecision(6);
cout<<" TIME USAGE: \n";
cout<<" "<<dt<<" s "<<endl<<endl;
cout<<"===================================================================================================="<<endl<<endl;
cuda_bst.releaseOps();
}
int countMistakes(string &which_op){
int cnt = 0;
for(int i=0;i<N_ops;++i) if(serial_rst[i] != cuda_rst[i]){
++cnt;
which_op += ops[i];
}
return cnt;
}
~TestCudaBST(){
delete [] inputs;
delete [] ops;
delete [] serial_rst;
delete [] cuda_rst;
}
};
int main(){
TestCudaBST test;
string ans;
test.getOpsList(2048, 500, 6000, 6000);
test.checkSerial();
test.checkParallel();
return 0;
}
|
13,147 | #include <stdio.h> // printf
__global__ void hello_kernel() {
// calculate global thread identifier, note blockIdx.x=0 here
const auto thid = blockDim.x*blockIdx.x + threadIdx.x;
// print a greeting message
printf("Hello from thread %d!\n", thid);
}
// compile with: nvcc hello_world.cu -std=c++11 -O3
// output:
// Hello from thread 0!
// Hello from thread 1!
// Hello from thread 2!
// Hello from thread 3!
int main (int argc, char * argv[]) {
// set the ID of the CUDA device
cudaSetDevice(0);
// invoke kernel using 4 threads executed in 1 thread block
hello_kernel<<<1, 4>>>();
// synchronize the GPU preventing premature termination
cudaDeviceSynchronize();
}
|
13,148 | // NOTE: h_ prefix means "host" (CPU)
// d_ prefix means "device" (GPU)
#include <iostream>
#include <fstream>
#include <cuda.h>
using namespace std;
// GPU code
__global__ void soft_to_hard(double* soft, int* hard) {
int ix = (blockDim.x * blockIdx.x) + threadIdx.x;
hard[ix] = (soft[ix] > 0); // XXX: Is this faster than an 'if'?
}
// CPU code
int main(int argc, char* argv[]) {
if (argc != 2) {
cout << "Wrong number of arguments. Expected 1, got " << (argc-1)
<< endl;
return 1;
}
// Read values from input file
ifstream file(argv[1]);
int softCount;
file >> softCount;
double* h_soft = new double[softCount];
for (int i = 0; i < softCount; ++i) {
file >> h_soft[i];
}
file.close();
// Copy values to device memory
double* d_soft;
cudaMalloc((void**)&d_soft, softCount*sizeof(double));
cudaMemcpy(d_soft, h_soft, softCount*sizeof(double), cudaMemcpyHostToDevice);
int* d_hard;
cudaMalloc((void**)&d_hard, softCount*sizeof(int));
// Set up the computational grid
int threadsPerBlock = 2;
int blocksPerGrid = (softCount + threadsPerBlock - 1) / threadsPerBlock;
// Launch kernel
soft_to_hard<<<blocksPerGrid, threadsPerBlock>>>(d_soft, d_hard);
// Copy results to host memory
int* h_hard = new int[softCount];
cudaMemcpy(h_hard, d_hard, softCount*sizeof(int), cudaMemcpyDeviceToHost);
// Print results
for (int i = 0; i < softCount; ++i) {
cout << h_hard[i] << ' ';
}
cout << endl;
cudaFree(d_soft);
cudaFree(d_hard);
delete[] h_soft;
delete[] h_hard;
return 0;
}
|
13,149 | #include "includes.h"
__global__ void copyToOpenMM( float *target, float *source, int N ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
int atom = elementNum / 3;
if( elementNum > N ) {
return;
}
//else target[elementNum] = source[elementNum];
else {
target[4 * atom + elementNum % 3] = source[elementNum];
}
} |
13,150 | // 矩阵加法
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
using namespace std;
__global__ void Plus(float A[], float B[], float C[], int n) {
// GPU上的代码
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main() {
float *A, *Ad, *B, *Bd, *C, *Cd;
int n = 1024 * 1024;
int size = n * sizeof(float);
// CPU分配内存
A = (float*)malloc(size);
B = (float*)malloc(size);
C = (float*)malloc(size);
for (int i = 0; i < n; i++) {
A[i] = 90.0;
B[i] = 10.0;
}
// GPU分配内存
cudaMalloc((void**)&Ad, size);
cudaMalloc((void**)&Bd, size);
cudaMalloc((void**)&Cd, size);
// 将CPU数据拷贝到GPU
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(Cd, C, size, cudaMemcpyHostToDevice);
dim3 blockNum(n / 512);
dim3 threadPerBlock(512);
Plus<<<blockNum, threadPerBlock>>>(Ad, Bd, Cd, n);
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
float max_error = 0.0;
for (int i = 0; i < n; i++)
{
max_error += fabs(100.0 - C[i]);
}
cout << "max error is " << max_error << endl;
free(A);
free(B);
free(C);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
return 0;
} |
13,151 | #include <stdio.h>
#include <ctime>
#include <stdlib.h>
#include <sys/time.h>
// Thread block size
#define BLOCK_SIZE 16
#define TILE_SIZE 32
#define ROW 1024
#define COL 1024
// GPU Functions
void MM_Basic(float *a, float *b, float *c, int row, int col, int k);
__global__ void MM_Basic_kernel( float *devA, float *devB, float *devC, int row, int col, int k);
void MM_Improved(float *a, float *b, float *c, int row, int col, int k);
__global__ void MM_Improved_kernel( float *devA, float *devB, float *devC, int row, int col, int k);
/*
* Main function
*/
int main(int argC, char** argV) {
//
// Setup
//////////////////
// Time Variables
float time;
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Matrices
float *a, *b;
float *c_cpu, *c_gpu_basic, *c_gpu_improved;
//Setting matrix parameters.
int row = ROW;
int col = COL;
int k = COL;
int sum = 0;
// Process input arguments (if specified)
switch (argC) {
case 2: {
row = atoi(argV[1]);
col = row;
k = col;
break;
}
case 3: {
row = atoi(argV[1]);
col = atoi(argV[2]);
k = col;
break;
}
default: {
//Nothing
}
}
//Setting host memory space.
a = (float *) malloc(row*k*sizeof(float));
b = (float *) malloc(k*col*sizeof(float));
c_cpu = (float *) malloc(row*col*sizeof(float));
c_gpu_basic = (float *) malloc(row*col*sizeof(float));
c_gpu_improved = (float *) malloc(row*col*sizeof(float));
//Initializing [A] and [B] with random values from 1 to 10, and C to 0
printf ("Initializing Matricies, could take some time...\n");
for(int i = 0 ; i < row ; i++ ){
for(int j = 0 ; j < k ; j++ ){
a[i*k+j] = rand()%10;
}
}
for(int i = 0 ; i < k ; i++ ){
for(int j = 0 ; j < col ; j++ ){
b[i*col+j] = rand()%10;
}
}
for(int i = 0 ; i < k ; i++ ){
for(int j = 0 ; j < col ; j++ ){
c_cpu [i*col+j] = 0;
c_gpu_basic [i*col+j] = 0;
c_gpu_improved [i*col+j] = 0;
}
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
for(int i = 0 ; i < row ; i++ ){
for(int j = 0 ; j < col ; j++ ){
sum = 0;
for(int w = 0 ; w < k ; w++ ){
sum += a[i*k+w] * b[w*col+j];
}
c_cpu[i*col+j] = sum;
}
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// Basic GPU Calculation
////////////////////////
printf("Running Basic parallel job.\n");
cudaEventRecord(start,0);
MM_Basic(a, b, c_gpu_basic, row, col, k);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tBasic Parallel Job Time: %.2f ms\n", time);
// Compares matrices to make sure answer is correct, initializes c for next kernel.
bool error = false;
for(int i = 0 ; i < k ; i++ ){
for(int j = 0 ; j < col ; j++ ){
if (c_cpu[i*col+j] != c_gpu_basic[i*col+j]) {
printf("\tError: Starting at [%d][%d]\n", i, j);
error = true;
}
if (error) break;
}
if (error) break;
}
if (!error) printf("\tNo errors found.\n");
//
// Improved GPU Calculation
////////////////////////
printf("Running Improved parallel job.\n");
cudaEventRecord(start,0);
MM_Improved(a, b, c_gpu_improved, row, col, k);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tImproved Parallel Job Time: %.2f ms\n", time);
// Compares matrices to make sure answer is correct, initializes c for next kernel.
error = false;
for(int i = 0 ; i < k ; i++ ){
for(int j = 0 ; j < col ; j++ ){
if (c_cpu[i*col+j] != c_gpu_improved[i*col+j]) {
printf("\tError: Starting at [%d][%d]\n", i, j);
error = true;
}
if (error) break;
}
if (error) break;
}
if (!error) printf("\tNo errors found.\n");
free (a);
free (b);
free (c_cpu);
free (c_gpu_basic);
free (c_gpu_improved);
}
void MM_Basic(float *a, float *b, float *c, int row, int col, int k) {
cudaEvent_t kernelstart, kernelstop;
float time;
cudaEventCreate (&kernelstart);
cudaEventCreate (&kernelstop);
int sizeA = row*k*sizeof(float);
int sizeB = k*col*sizeof(float);
int sizeC = row*col*sizeof(float);
float *devA, *devB, *devC;
cudaMalloc((void**)&devA, sizeA);
cudaMalloc((void**)&devB, sizeB);
cudaMalloc((void**)&devC, sizeC);
cudaMemcpy(devA, a, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeB, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1);
dim3 dimGrid((COL+dimBlock.x-1)/dimBlock.x, (ROW+dimBlock.y-1)/dimBlock.y, 1);
cudaEventRecord(kernelstart,0);
MM_Basic_kernel<<<dimGrid, dimBlock>>>(devA, devB, devC, row, col, k);
cudaEventRecord(kernelstop,0);
cudaEventSynchronize(kernelstop);
cudaEventElapsedTime(&time, kernelstart, kernelstop);
printf("\tKernel Job Time: %.2f ms\n", time);
cudaMemcpy(c, devC, sizeC, cudaMemcpyDeviceToHost);
//Freeing device matrices.
cudaFree(devA); cudaFree(devB); cudaFree(devC);
}
__global__ void MM_Basic_kernel( float *devA, float *devB, float *devC, int row, int col, int k) {
int txID = blockIdx.x * blockDim.x + threadIdx.x;
int tyID = blockIdx.y * blockDim.y + threadIdx.y;
if ((txID < col) && (tyID < row)) {
float Pvalue = 0;
for(int w = 0 ; w < k ; w++) {
Pvalue += devA[tyID*k+w] * devB[w*k+txID];
}
devC[tyID*k+txID] = Pvalue;
}
}
void MM_Improved(float *a, float *b, float *c, int row, int col, int k){
// Write Code here
cudaEvent_t kernelstart, kernelstop;
float time;
cudaEventCreate (&kernelstart);
cudaEventCreate (&kernelstop);
int sizeA = row*k*sizeof(float);
int sizeB = k*col*sizeof(float);
int sizeC = row*col*sizeof(float);
float *devA, *devB, *devC;
cudaMalloc((void**)&devA, sizeA);
cudaMalloc((void**)&devB, sizeB);
cudaMalloc((void**)&devC, sizeC);
cudaMemcpy(devA, a, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeB, cudaMemcpyHostToDevice);
dim3 dimBlock(32, 32, 1);
dim3 dimGrid((COL+dimBlock.x-1)/dimBlock.x, (ROW+dimBlock.y-1)/dimBlock.y, 1);
cudaEventRecord(kernelstart,0);
MM_Basic_kernel<<<dimGrid, dimBlock>>>(devA, devB, devC, row, col, k);
cudaEventRecord(kernelstop,0);
cudaEventSynchronize(kernelstop);
cudaEventElapsedTime(&time, kernelstart, kernelstop);
printf("\tKernel Job Time: %.2f ms\n", time);
cudaMemcpy(c, devC, sizeC, cudaMemcpyDeviceToHost);
//Freeing device matrices.
cudaFree(devA); cudaFree(devB); cudaFree(devC);
}
__global__ void MM_Improved_kernel( float *devA, float *devB, float *devC, int row, int col, int k){
// Write Code here
__shared__ int shareBlockA[TILE_SIZE][TILE_SIZE];
__shared__ int shareBlockB[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_SIZE + ty;
int Col = bx * TILE_SIZE + tx;
float Cvalue = 0;
for (int m = 0; m < row/TILE_SIZE; ++m) {
shareBlockA[ty][tx] = devA[Row * row + (m*TILE_SIZE + tx)];
shareBlockB[ty][tx] = devB[Col + (m * TILE_SIZE + ty) * row];
__syncthreads();
for (int k = 0; k < TILE_SIZE; ++k)
Cvalue += shareBlockA[ty][k] * shareBlockB[k][tx];
__syncthreads();
}
devC[Row*row+Col] = Cvalue;
}
|
13,152 | ///-------------------------------------------------------------------------------------------------
// file: pfxsum.cu
//
// summary: simple cuda prefix sum
///-------------------------------------------------------------------------------------------------
#define PFXSUM_BLOCK_SIZE 256
__device__ int nextpowerof2(int n) {
int k;
if (n > 1)
{
float f = (float) n;
unsigned int const t = 1U << ((*(unsigned int *)&f >> 23) - 0x7f);
k = t << (t < n);
}
else k = 1;
return k;
}
extern "C" __global__ void
pfxsum(
int * pin,
int * pout,
int N
)
{
__shared__ int g_shared[PFXSUM_BLOCK_SIZE];
unsigned int t;
int idx = threadIdx.x;
int d, e;
int offset = 1;
int nbIdx = idx*2;
int nbIdx1 = 2*idx+1;
g_shared[nbIdx] = nbIdx < N ? pin[nbIdx] : 0;
g_shared[nbIdx1] = nbIdx1 < N ? pin[nbIdx1] : 0;
float f = (float) N;
t = 1U << ((*(unsigned int *)&f >> 23) - 0x7f);
int nUpper = t << (t < N);
for(d = nUpper >> 1; d > 0; d >>= 1) {
__syncthreads();
if(idx < d) {
int ai = offset*(2*idx+1)-1;
int bi = offset*(2*idx+2)-1;
g_shared[bi] += g_shared[ai];
}
offset *= 2;
}
if(idx == 0) {
g_shared[nUpper-1] = 0;
}
for(e=1; e<nUpper; e*=2) {
offset >>= 1;
__syncthreads();
if(idx < e) {
int ai = offset*(2*idx+1)-1;
int bi = offset*(2*idx+2)-1;
int t = g_shared[ai];
g_shared[ai] = g_shared[bi];
g_shared[bi] += t;
}
}
__syncthreads();
if(2*idx<N) pout[2*idx] = g_shared[2*idx];
if(2*idx+1<N) pout[2*idx+1] = g_shared[2*idx+1];
__syncthreads();
}
template<typename T, // type of input (assumed integral)
int nBlockSize> // shared memory block size
__device__ void
tpfxsum(
T * pin,
T * pout,
int N
)
{
__shared__ int g_shared[nBlockSize];
int idx = threadIdx.x;
T d, e;
int offset = 1;
g_shared[idx*2] = pin[2*idx];
g_shared[2*idx+1] = pin[2*idx+1];
for(d = N >> 1; d > 0; d >>= 1) {
__syncthreads();
if(idx < d) {
T ai = offset*(2*idx+1)-1;
T bi = offset*(2*idx+2)-1;
g_shared[bi] += g_shared[ai];
}
offset *= 2;
}
if(idx == 0) {
g_shared[N-1] = 0;
}
for(e=1; e<N; e*=2) {
offset >>= 1;
__syncthreads();
if(idx < e) {
int ai = offset*(2*idx+1)-1;
int bi = offset*(2*idx+2)-1;
int t = g_shared[ai];
g_shared[ai] = g_shared[bi];
g_shared[bi] += t;
}
}
__syncthreads();
pout[2*idx] = g_shared[2*idx];
pout[2*idx+1] = g_shared[2*idx+1];
__syncthreads();
}
extern "C" __global__ void
cpfxsum(
int * pin,
int * pout,
int N
)
{
tpfxsum<int, PFXSUM_BLOCK_SIZE>(pin,
pout,
N);
} |
13,153 | #include "includes.h"
__device__ float dothings(int t,int sz, float *input){
float ans = 0;
for(int i=0;i<12;++i){
ans += input[(i+t)%sz];
}
return ans;
}
__global__ void process(int N_step, int N_inst, float *input, float *output){
int g_id = blockIdx.x * blockDim.x + threadIdx.x;
if(g_id >= N_inst) return;
float local_data[VEC_SIZE];
float ans = 0.;
for(int i=0;i<VEC_SIZE;++i) local_data[i] = input[VEC_SIZE * g_id + i];
for(int t=0;t<N_step;++t){
ans += dothings(t, VEC_SIZE, local_data);
}
output[g_id] = ans;
return;
} |
13,154 | #include "includes.h"
__global__ void backward_sam_kernel(float *in_w_h_c_delta, int size, int channel_size, float *in_scales_c, float *out_from_delta, float *in_from_output, float *out_state_delta)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out_state_delta[index] += in_w_h_c_delta[index] * in_from_output[index]; // l.delta * from (should be divided by channel_size?)
out_from_delta[index] += in_scales_c[index] * in_w_h_c_delta[index]; // input * l.delta
//out_state_delta[index] += in_w_h_c_delta[index];
//out_from_delta[index] = in_w_h_c_delta[index];
}
} |
13,155 | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Overlay_Cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int x_position = 1;
int y_position = 1;
unsigned char *main = NULL;
cudaMalloc(&main, XSIZE*YSIZE);
int main_linesize = XSIZE*YSIZE;
unsigned char *overlay = NULL;
cudaMalloc(&overlay, XSIZE*YSIZE);
int overlay_linesize = XSIZE*YSIZE;
int overlay_w = 1;
int overlay_h = 1;
unsigned char *overlay_alpha = NULL;
cudaMalloc(&overlay_alpha, XSIZE*YSIZE);
int alpha_linesize = XSIZE*YSIZE;
int alpha_adj_x = 2;
int alpha_adj_y = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Overlay_Cuda<<<gridBlock,threadBlock>>>(x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Overlay_Cuda<<<gridBlock,threadBlock>>>(x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Overlay_Cuda<<<gridBlock,threadBlock>>>(x_position,y_position,main,main_linesize,overlay,overlay_linesize,overlay_w,overlay_h,overlay_alpha,alpha_linesize,alpha_adj_x,alpha_adj_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
13,156 | #include <stdio.h>
#include <math.h>
__global__ void evenReduce(int *a, int *b, int numP)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < numP)
b[index] = a[index * 2] + a[index * 2 + 1];
}
__global__ void oddReduce(int *a, int *b, int numP)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < numP)
{
if(index != numP - 1)
{
b[index] = a[index * 2] + a[index * 2 + 1];
}
else
{
/*puts the remaining value that doesn't have a pair in the right index*/
b[index] = a[index * 2];
}
}
}
#define LENGTH 512
#define BLOCK_THREADS 512
int main()
{
double length = LENGTH;
int numP, l;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*create arrays for host and GPU*/
int *a, *b, *k_b, *k_a;
int size = length * sizeof( int );
a = (int *)malloc( size );
b = (int *)malloc( size );
cudaMalloc( (void **) &k_a, size );
cudaMalloc( (void **) &k_b, size );
/*initialize the array*/
for( int i = 0; i < length; i++ )
{
a[i] = i;
b[i] = 0;
}
/*array debug*/
// printf("A:\n");
// for(int i=0; i< length; i++)
// {
// printf("%d ", a[i]);
// }
/* copy inputs to device */
cudaMemcpy(k_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy(k_b, b, size, cudaMemcpyHostToDevice );
dim3 dimGrid( 1, 1 );
dim3 dimBlock(BLOCK_THREADS, 1);
/*Since each thread does 2 additions there are log2(N) iterations.*/
int gates = ceil(log(length) / log(2));
cudaEventRecord(start);
for(int i=0; i < gates; i++) {
/*get the number of threads needed. Ceiling used for odd array lengths*/
numP = ceil(length/2);
l = (int)length;
/*when threads are divisble by 2 use less code...*/
if( l % 2 == 0)
evenReduce<<<dimGrid,dimBlock>>>(k_a, k_b, numP);
else
oddReduce<<<dimGrid,dimBlock>>>(k_a, k_b, numP);
/*last # of threads will equal next array length to compute*/
length = numP;
// printf("\niteration %d reduction is\n", i + 1);
// cudaMemcpy(b, k_b, size, cudaMemcpyDeviceToHost );
// for(int i=0; i< N; i++)
// {
// printf("%d ", b[i]);
// }
/*send array b's data back to a after each iteration*/
cudaMemcpy(k_a, k_b, size, cudaMemcpyDeviceToDevice );
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
/* copy result back to host*/
cudaMemcpy(b, k_b, size, cudaMemcpyDeviceToHost );
printf("\nFinal reduction is %d\n", b[0]);
printf("\nThis took %f milliseconds\n", milliseconds);
/* clean up */
free(a);
free(b);
cudaFree( k_a );
cudaFree( k_b );
return 0;
}
|
13,157 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
__global__ void getmaxcu(unsigned int *, unsigned int *);
unsigned int getmax(unsigned int *, unsigned int);
__global__
void getmaxcu(unsigned int * gpu_numbers, unsigned int * gpu_max){
extern __shared__ unsigned int block_data[];
unsigned int tid = threadIdx.x;
block_data[tid] = gpu_numbers[(blockIdx.x*blockDim.x) + threadIdx.x];
__syncthreads();
// reduce
for(unsigned int offset=1; offset < blockDim.x; offset *= 2) {
int compare_val = 2*offset;
if (tid % (compare_val) == 0) {
if(block_data[tid] < block_data[tid + offset]){
block_data[tid] = block_data[tid + offset];
}
}
__syncthreads();
}
// write block max to list of maxes
if (tid == 0){
gpu_max[blockIdx.x] = block_data[0];
}
__syncthreads();
}
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
unsigned int * gpu_numbers;
unsigned int * host_max;
unsigned int * gpu_max;
unsigned int * gpu_max2;
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
// print sequential answer
//printf("Correct answer is: %u\n", getmax(numbers, size));
// allocate gpu memory for array of randomly generated numbers
int allocation_size = size * sizeof(unsigned int);
cudaMalloc((void**)&gpu_numbers,allocation_size);
cudaMemcpy(gpu_numbers,numbers,allocation_size,cudaMemcpyHostToDevice);
// get block num, and threads per block
int threads_per_block = 1024;
int num_of_blocks = (int)ceil(size/(double)threads_per_block);
// allocate gpu memory for max number
cudaMalloc((void**)&gpu_max,num_of_blocks*sizeof(unsigned int));
// find maxes of each block
getmaxcu<<<num_of_blocks,threads_per_block,threads_per_block*sizeof(unsigned int)>>>(gpu_numbers,gpu_max);
// while there is still more than 1 block, continue
// to reduce maxes
while(num_of_blocks>1){
// get new number of blocks
num_of_blocks = (int)ceil(num_of_blocks/(double)threads_per_block);
cudaMalloc((void**)&gpu_max2,num_of_blocks*sizeof(unsigned int));
// rerun kernel
getmaxcu<<<num_of_blocks,threads_per_block,threads_per_block*sizeof(unsigned int)>>>(gpu_max,gpu_max2);
// move over data
cudaMalloc((void**)&gpu_max,num_of_blocks*sizeof(unsigned int));
cudaMemcpy(gpu_max,gpu_max2,num_of_blocks*sizeof(unsigned int),cudaMemcpyDeviceToDevice);
}
// allocate memory for max on host
host_max = (unsigned int *)malloc(num_of_blocks * sizeof(unsigned int));
// copy max from device to host
cudaMemcpy(host_max,gpu_max,num_of_blocks * sizeof(unsigned int),cudaMemcpyDeviceToHost);
// display max
printf(" The maximum number in the array is: %u\n", host_max[0]);
// free memory
cudaFree(gpu_numbers);
cudaFree(gpu_max);
free(numbers);
free(host_max);
exit(0);
}
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
|
13,158 | #include "includes.h"
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
int pos = (row*width+col)*Channels;
imageOutput[row*width+col] = imageInput[pos+RED]*0.299 + imageInput[pos+GREEN]*0.587 + imageInput[pos+BLUE]*0.114;
}
} |
13,159 | #include "includes.h"
__global__ void cudaSToOutput_kernel( unsigned int nbProposals, const unsigned int scoreIdx, const unsigned int nbCls, const unsigned int nbOutputs, const unsigned int maxParts, const unsigned int maxTemplates, bool generateParts, bool generateTemplates, const unsigned int* numPartsPerClass, const unsigned int* numTemplatesPerClass, const int* maxCls, const float* inputs, const int* predictionIndex, const float* partsPrediction, const float* partsVisibilityPrediction, const float* templatesPrediction, float* outputs)
{
const int batchPos = blockIdx.z*nbProposals;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
if(index < nbProposals)
{
const unsigned int inputIdx = index*4*(nbCls - scoreIdx)
+ batchPos*4*(nbCls - scoreIdx);
//const unsigned int outputIdx = (nbOutputs == 4) ?
// index*4 + batchPos*4
// : index*5 + batchPos*5;
unsigned int outputIdx = 0;
unsigned offset = 0;
if((nbOutputs == 4))
outputIdx = index*4 + batchPos*4;
else if((nbOutputs == 5))
outputIdx = index*5 + batchPos*5;
else if(generateParts && generateTemplates)
outputIdx = (index + batchPos)*(5 + maxParts*3 + maxTemplates*3);
else if(generateTemplates)
outputIdx = (index + batchPos)*(5 + maxTemplates*3);
else if(generateParts)
outputIdx = (index + batchPos)*(5 + maxParts*3);
outputs[0 + outputIdx] = inputs[0 + inputIdx];
outputs[1 + outputIdx] = inputs[1 + inputIdx];
outputs[2 + outputIdx] = inputs[2 + inputIdx];
outputs[3 + outputIdx] = inputs[3 + inputIdx];
offset = 4;
if(nbOutputs > 4)
{
int cls = maxCls[index + batchPos];
outputs[4 + outputIdx] = cls > -1 ?
(float) cls
: 0.0;
offset += 1;
}
if(generateParts)
{
const int predProp = predictionIndex[(index + batchPos)*2 + 0];
const int predCls = predictionIndex[(index + batchPos)*2 + 1];
// PARTS PROCESSING
if(predCls > -1)
{
for(unsigned int part = 0; part < numPartsPerClass[predCls];
++part)
{
const unsigned int partIdx = batchPos*maxParts*2*nbCls
+ predProp*maxParts*2*nbCls
+ predCls*maxParts*2
+ part*2;
outputs[0 + offset + part*2 + outputIdx] = partsPrediction[0 + partIdx];
outputs[1 + offset + part*2 + outputIdx] = partsPrediction[1 + partIdx];
}
for(int idx = numPartsPerClass[predCls]; idx < maxParts; ++idx)
{
outputs[0 + offset + numPartsPerClass[predCls]*2 + idx*2 + outputIdx] = 0.0;
outputs[1 + offset + numPartsPerClass[predCls]*2 + idx*2 + outputIdx] = 0.0;
}
}
offset += maxParts*2;
if(predCls > -1)
{
// PARTS VISIBILITY PROCESSING
for(unsigned int part = 0; part < numPartsPerClass[predCls];
++part)
{
const unsigned int partVisibilityIdx = batchPos*maxParts*nbCls
+ predProp*maxParts*nbCls
+ predCls*maxParts
+ part;
outputs[offset + part + outputIdx] = partsVisibilityPrediction[partVisibilityIdx];
}
for(int idx = numPartsPerClass[predCls]; idx < maxParts; ++idx)
outputs[offset + numPartsPerClass[predCls] + idx + outputIdx] = -1.0;
}
offset += maxParts;
}
if(generateTemplates)
{
const int predProp = predictionIndex[(index + batchPos)*2 + 0];
const int predCls = predictionIndex[(index + batchPos)*2 + 1];
if(predCls > -1)
{
for(unsigned int tpl = 0; tpl < numTemplatesPerClass[predCls]; ++tpl)
{
unsigned int templateIdx = batchPos*maxTemplates*3*nbCls
+ predProp*maxTemplates*3*nbCls
+ predCls*maxTemplates*3
+ tpl*3;
outputs[0 + offset + tpl*3 + outputIdx] = templatesPrediction[0 + templateIdx];
outputs[1 + offset + tpl*3 + outputIdx] = templatesPrediction[1 + templateIdx];
outputs[2 + offset + tpl*3 + outputIdx] = templatesPrediction[2 + templateIdx];
}
for(int idx = numTemplatesPerClass[predCls]; idx < maxParts; ++idx)
{
outputs[0 + offset + numTemplatesPerClass[predCls]*3 + idx*3 + outputIdx] = 0.0;
outputs[1 + offset + numTemplatesPerClass[predCls]*3 + idx*3 + outputIdx] = 0.0;
outputs[2 + offset + numTemplatesPerClass[predCls]*3 + idx*3 + outputIdx] = 0.0;
}
}
}
}
} |
13,160 | extern "C"
__device__
float saxpy_dev(float a, float x, float y)
{
return a * x + y;
}
|
13,161 | __global__ void count_characters(int *buffer, int *freq, long file_size, int base) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = gridDim.x * blockDim.x;
long i;
for (i=index; i<file_size; i+=total_threads)
atomicAdd(&(freq[buffer[i] - base]), 1);
} |
13,162 | #include "includes.h"
__global__ void reluForward(float* R, float* V, int x, int y){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < x*y)
R[index] = fmaxf(V[index], 0);
} |
13,163 | #include "includes.h"
__global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == size - 1)
accumulatedSize[part[idx]] = weights[idx];
if (idx < size - 1)
{
int thisPart = part[idx];
if (thisPart != part[idx + 1])
accumulatedSize[thisPart] = weights[idx];
}
} |
13,164 | #include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#define Z 2
#define Y 5
#define X 5
#define xBound X / 2
#define yBound Y / 2
#define SCALE 8
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
__constant__ char mask[Z][Y][X] = { { { -1, -4, -6, -4, -1 },
{ -2, -8, -12, -8, -2 },
{ 0, 0, 0, 0, 0 },
{ 2, 8, 12, 8, 2 },
{ 1, 4, 6, 4, 1 } },
{ { -1, -2, 0, 2, 1 },
{ -4, -8, 0, 8, 4 },
{ -6, -12, 0, 12, 6 },
{ -4, -8, 0, 8, 4 },
{ -1, -2, 0, 2, 1 } } };
inline __device__ int bound_check(int val, int lower, int upper) {
if (val >= lower && val < upper)
return 1;
else
return 0;
}
__global__ void sobel(unsigned char *s, unsigned char *t, unsigned height, unsigned width, unsigned channels) {
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
float val[Z][3];
__shared__ unsigned char RMat[5][262];
__shared__ unsigned char GMat[5][262];
__shared__ unsigned char BMat[5][262];
// if (tid >= height) return;
// int y = tid;
for (int y = blockIdx.x; y < height; y += gridDim.x) {
for (int x = threadIdx.x; x < width; x += blockDim.x) {
// for (int x = 0; x < width; ++x) {
/* Z axis of mask */
/* Y and X axis of mask */
for (int v = -yBound; v <= yBound; ++v) {
if (bound_check(y + v, 0, height)) {
// FIXME: 目前這份 code 跑得過,只有在 threadNum 設成 64 的時候。
// 當我 threadNum 設成 256 時,當每個 blockIdx 進入第二次輪迴(blockIdx.x + gridDim.x), x_row threadIdx.x 為 64 的地方都會出錯。
// 我強制設定為 threadNum 為 64,避免上述情況發生,但應該還是要 de 出來。
RMat[v+yBound][threadIdx.x + 2] = s[channels * (width * (y + v) + x) + 2];
GMat[v+yBound][threadIdx.x + 2] = s[channels * (width * (y + v) + x) + 1];
BMat[v+yBound][threadIdx.x + 2] = s[channels * (width * (y + v) + x) + 0];
if (threadIdx.x == 0) {
if (bound_check(x - 2, 0, width)) {
RMat[v+yBound][threadIdx.x] = s[channels * (width * (y + v) + x-2) + 2];
GMat[v+yBound][threadIdx.x] = s[channels * (width * (y + v) + x-2) + 1];
BMat[v+yBound][threadIdx.x] = s[channels * (width * (y + v) + x-2) + 0];
}
if (bound_check(x - 1, 0, width)) {
RMat[v+yBound][threadIdx.x + 1] = s[channels * (width * (y + v) + x-1) + 2];
GMat[v+yBound][threadIdx.x + 1] = s[channels * (width * (y + v) + x-1) + 1];
BMat[v+yBound][threadIdx.x + 1] = s[channels * (width * (y + v) + x-1) + 0];
}
} else if (threadIdx.x == blockDim.x - 1) {
if (bound_check(x + 1, 0, width)) {
RMat[v+yBound][threadIdx.x + 3] = s[channels * (width * (y + v) + x+1) + 2];
GMat[v+yBound][threadIdx.x + 3] = s[channels * (width * (y + v) + x+1) + 1];
BMat[v+yBound][threadIdx.x + 3] = s[channels * (width * (y + v) + x+1) + 0];
}
if (bound_check(x + 2, 0, width)) {
RMat[v+yBound][threadIdx.x + 4] = s[channels * (width * (y + v) + x+2) + 2];
GMat[v+yBound][threadIdx.x + 4] = s[channels * (width * (y + v) + x+2) + 1];
BMat[v+yBound][threadIdx.x + 4] = s[channels * (width * (y + v) + x+2) + 0];
}
}
}
}
__syncthreads();
for (int i = 0; i < Z; ++i) {
val[i][2] = 0.;
val[i][1] = 0.;
val[i][0] = 0.;
/* Y and X axis of mask */
for (int v = -yBound; v <= yBound; ++v) {
for (int u = -xBound; u <= xBound; ++u) {
if (bound_check(x + u, 0, width) && bound_check(y + v, 0, height)) {
// const unsigned char R = s[channels * (width * (y + v) + (x + u)) + 2];
// const unsigned char G = s[channels * (width * (y + v) + (x + u)) + 1];
// const unsigned char B = s[channels * (width * (y + v) + (x + u)) + 0];
const unsigned char R = RMat[v+yBound][threadIdx.x + 2 + u];
const unsigned char G = GMat[v+yBound][threadIdx.x + 2 + u];
const unsigned char B = BMat[v+yBound][threadIdx.x + 2 + u];
val[i][2] += R * mask[i][u + xBound][v + yBound];
val[i][1] += G * mask[i][u + xBound][v + yBound];
val[i][0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
float totalR = 0.;
float totalG = 0.;
float totalB = 0.;
for (int i = 0; i < Z; ++i) {
totalR += val[i][2] * val[i][2];
totalG += val[i][1] * val[i][1];
totalB += val[i][0] * val[i][0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.) ? 255 : totalR;
const unsigned char cG = (totalG > 255.) ? 255 : totalG;
const unsigned char cB = (totalB > 255.) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
__syncthreads();
}
}
}
int main(int argc, char **argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char *src = NULL, *dst;
unsigned char *dsrc, *ddst;
/* read the image to src, and get height, width, channels */
if (read_png(argv[1], &src, &height, &width, &channels)) {
std::cerr << "Error in read png" << std::endl;
return -1;
}
dst = (unsigned char *)malloc(height * width * channels * sizeof(unsigned char));
cudaHostRegister(src, height * width * channels * sizeof(unsigned char), cudaHostRegisterDefault);
// cudaMalloc(...) for device src and device dst
cudaMalloc(&dsrc, height * width * channels * sizeof(unsigned char));
cudaMalloc(&ddst, height * width * channels * sizeof(unsigned char));
// cudaMemcpy(...) copy source image to device (mask matrix if necessary)
cudaMemcpy(dsrc, src, height * width * channels * sizeof(unsigned char), cudaMemcpyHostToDevice);
// decide to use how many blocks and threads
const int num_threads = 64;
// const int num_blocks = height / num_threads + 1;
const int num_blocks = 2048;
// launch cuda kernel
sobel <<<num_blocks, num_threads>>> (dsrc, ddst, height, width, channels);
// cudaMemcpy(...) copy result image to host
cudaMemcpy(dst, ddst, height * width * channels * sizeof(unsigned char), cudaMemcpyDeviceToHost);
write_png(argv[2], dst, height, width, channels);
free(src);
free(dst);
cudaFree(dsrc);
cudaFree(ddst);
return 0;
}
|
13,165 | #include <stdio.h>
#include <stdlib.h>
__global__
void matMultKernel(float *d_M, float *d_N, float *d_P, int Width){
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int k = 0;
if(Row < Width && Col < Width){
float Pvalue = 0;
for(k = 0; k < Width; ++k){
Pvalue += d_M[Row*Width + k] * d_N[k*Width+Col];
}
d_P[Row*Width+Col] = Pvalue;
}
}
void matMult(float* A, float* B, float* C, int n){
int size = n*n*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
dim3 dimGrid(ceil(n/8.0),ceil(n/16.0),1);
dim3 dimBlock(8,16,1);
matMultKernel<<<dimGrid, dimBlock>>>(d_A,d_B,d_C,n);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main(){
int n,i,j;
float *h_A,*h_B,*h_C;
scanf("%d", &n);
h_A = (float*) malloc(n*n*sizeof(float));
h_B = (float*) malloc(n*n*sizeof(float));
h_C = (float*) malloc(n*n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
for(j = 0; j < n; j++)
h_A[i*n+j] = 1;
}
for(i = 0; i < n; i++){
//scanf("%f", &h_B[i]);
for(j = 0; j < n; j++)
h_B[i*n+j] = 1;
}
matMult(h_A,h_B,h_C,n);
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
printf("%f ", h_C[i*n+j]);
}
printf("\n");
}
printf("\n");
return 0;
}
|
13,166 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
namespace GameOfLifeCUDALibrary {
__global__ void InitWorld(int width, int height)
{
}
void init_world_cuda(int width, int height)
{
}
} |
13,167 |
#include <cstdio>
#include <cstdlib>
#include <cuda.h>
//Init cuda here
// __device__ void VecTest(float * A, float* B, size_t n){
//
// size_t tid = threadIdx.x;
// if (tid < n){
// B[N] = A[N];
// }
//
// __syncthreads();
// return;
// }
__global__ void copy_kernel(double * to_copy, double* items, size_t n) {
size_t tid = threadIdx.x;
if (tid < n) {
to_copy[tid] = items[tid];
}
}
void copy_wrapper(double * to_copy, double* items, size_t n){
copy_kernel<<<1,n>>>(to_copy, items, n);
}
|
13,168 | #include "includes.h"
// Subpart A:
// Write step 1 as a kernel that operates on threads 0--31.
// Assume that the input flags are 0 for false and 1 for true and are stored
// in a local per-thread register called p (for predicate).
//
// You have access to 31 words of shared memory s[0:31], with s[0]
// corresponding to thread 0 and s[31] corresponding to thread 31.
// You may change the values of s[0:31]. Put the return sum in s[0].
// Your code should execute no more than 5 warp-wide addition operations.
__device__ unsigned int shared_reduce(unsigned int p, volatile unsigned int * s) {
// Assumes values in 'p' are either 1 or 0
// Assumes s[0:31] are allocated
// Sums p across warp, returning the result. Suggest you put
// result in s[0] and return it
// You may change any value in s
// You should execute no more than 5 + operations (if you're doing
// 31, you're doing it wrong)
int tid = threadIdx.x;
s[tid] = p;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) { // This could be unrolled
if (tid < i) {
s[tid] += s[tid+i];
}
__syncthreads();
}
return s[0];
}
__global__ void reduce(unsigned int * d_out_shared, const unsigned int * d_in)
{
extern __shared__ unsigned int s[];
int t = threadIdx.x;
int p = d_in[t];
unsigned int sr = shared_reduce(p, s);
if (t == 0)
{
*d_out_shared = sr;
}
} |
13,169 | #include "includes.h"
__global__ void check_for_generator_spikes_kernel(int *d_neuron_ids_for_stimulus, float *d_spike_times_for_stimulus, float* d_last_spike_time_of_each_neuron, unsigned char* d_bitarray_of_neuron_spikes, int bitarray_length, int bitarray_maximum_axonal_delay_in_timesteps, float current_time_in_seconds, float timestep, size_t number_of_spikes_in_stimulus, bool high_fidelity_spike_flag) {
// // Get thread IDs
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < number_of_spikes_in_stimulus) {
if (fabs(current_time_in_seconds - d_spike_times_for_stimulus[idx]) < 0.5 * timestep) {
__syncthreads();
d_last_spike_time_of_each_neuron[d_neuron_ids_for_stimulus[idx]] = current_time_in_seconds;
if (high_fidelity_spike_flag){
// Get start of the given neuron's bits
int neuron_id_spike_store_start = d_neuron_ids_for_stimulus[idx] * bitarray_length;
// Get offset depending upon the current timestep
int offset_index = (int)(round((float)(current_time_in_seconds / timestep))) % bitarray_maximum_axonal_delay_in_timesteps;
int offset_byte = offset_index / 8;
int offset_bit_pos = offset_index - (8 * offset_byte);
// Get the specific position at which we should be putting the current value
unsigned char byte = d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte];
// Set the specific bit in the byte to on
byte |= (1 << offset_bit_pos);
// Assign the byte
d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte] = byte;
}
} else {
// High fidelity spike storage
if (high_fidelity_spike_flag){
// Get start of the given neuron's bits
int neuron_id_spike_store_start = d_neuron_ids_for_stimulus[idx] * bitarray_length;
// Get offset depending upon the current timestep
int offset_index = (int)(round((float)(current_time_in_seconds / timestep))) % bitarray_maximum_axonal_delay_in_timesteps;
int offset_byte = offset_index / 8;
int offset_bit_pos = offset_index - (8 * offset_byte);
// Get the specific position at which we should be putting the current value
unsigned char byte = d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte];
// Set the specific bit in the byte to on
byte &= ~(1 << offset_bit_pos);
// Assign the byte
d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte] = byte;
}
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
} |
13,170 |
__device__ void chebyshev_polynomial(double x, double* y, int o)
{
switch(o) {
case 0:
*y = 1.0;
break;
case 1:
*y = x;
break;
case 2:
*y = 2.0*x*x - 1.0;
break;
case 3:
*y = x*(4.0*x*x - 3.0);
break;
case 4:
*y = 8.0*x*x*x*x - 8.0*x*x + 1.0;
break;
case 5:
*y = x*(16.0*x*x*x*x - 20.0*x*x + 5.0);
break;
case 6:
*y = 32.0*x*x*x*x*x*x - 48.0*x*x*x*x + 18.0*x*x - 1.0;
break;
case 7:
*y = x*(64.0*x*x*x*x*x*x - 112.0*x*x*x*x + 56.0*x*x - 7.0);
break;
case 8:
*y = 128.0*x*x*x*x*x*x*x*x - 256.0*x*x*x*x*x*x + 160.0*x*x*x*x - 32.0*x*x + 1.0;
break;
case 9:
*y = x*(256.0*x*x*x*x*x*x*x*x - 576.0*x*x*x*x*x*x + 432.0*x*x*x*x - 120.0*x*x + 9.0);
break;
case 10:
*y = 512.0*x*x*x*x*x*x*x*x*x*x - 1280.0*x*x*x*x*x*x*x*x + 1120.0*x*x*x*x*x*x - 400.0*x*x*x*x + 50.0*x*x - 1.0;
break;
case 11:
*y = x*(1024.0*x*x*x*x*x*x*x*x*x*x - 2816.0*x*x*x*x*x*x*x*x + 2816.0*x*x*x*x*x*x - 1232.0*x*x*x*x + 220.0*x*x - 11.0);
break;
case 12:
*y = 2048.0*x*x*x*x*x*x*x*x*x*x*x*x - 6144.0*x*x*x*x*x*x*x*x*x*x + 6912.0*x*x*x*x*x*x*x*x - 3584.0*x*x*x*x*x*x + 840.0*x*x*x*x - 72.0*x*x + 1.0;
break;
case 13:
*y = x*(4096.0*x*x*x*x*x*x*x*x*x*x*x*x - 13312.0*x*x*x*x*x*x*x*x*x*x + 16640.0*x*x*x*x*x*x*x*x - 9984.0*x*x*x*x*x*x + 2912.0*x*x*x*x - 364.0*x*x + 13.0);
break;
case 14:
*y = 8192.0*x*x*x*x*x*x*x*x*x*x*x*x*x*x - 28672.0*x*x*x*x*x*x*x*x*x*x*x*x + 39424.0*x*x*x*x*x*x*x*x*x*x - 26880.0*x*x*x*x*x*x*x*x + 9408.0*x*x*x*x*x*x - 1568.0*x*x*x*x + 98.0*x*x - 1.0;
break;
case 15:
*y = x*(16384.0*x*x*x*x*x*x*x*x*x*x*x*x*x*x - 61440.0*x*x*x*x*x*x*x*x*x*x*x*x + 92160.0*x*x*x*x*x*x*x*x*x*x - 70400.0*x*x*x*x*x*x*x*x + 28800.0*x*x*x*x*x*x - 6048.0*x*x*x*x + 560.0*x*x - 15.0);
break;
case 16:
*y = 32768.0*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x - 131072.0*x*x*x*x*x*x*x*x*x*x*x*x*x*x + 212992.0*x*x*x*x*x*x*x*x*x*x*x*x - 180224.0*x*x*x*x*x*x*x*x*x*x + 84480.0*x*x*x*x*x*x*x*x - 21504.0*x*x*x*x*x*x + 2688.0*x*x*x*x - 128.0*x*x + 1.0;
break;
default:
*y = 0.0;
break;
}
}
|
13,171 | /*#include <GL/glut.h>
#include <stdio.h>
#include <cuda.h>
#define DIM 500
__global__ void kernel( unsigned char *ptr, int ticks )
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
float fact=16.0f;
if(d<fact)
fact=d;
unsigned char color = (unsigned char)((255.0f * cos(d/2.0f - ticks/8.0f))/(d/fact));
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 1] = color*0.88f;
ptr[offset*4 + 2] = color*0.92f;
ptr[offset*4 + 3] = 255;
}
void display_cb()
{
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(1,1,0);
unsigned char *gpu_bitmap;
unsigned char *cpu_bitmap=(unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM*4);
cudaMalloc( (void**)&gpu_bitmap, DIM*DIM*4 );
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
int ticks = 0;
while(ticks<50)
{
kernel<<<blocks,threads>>>( gpu_bitmap, ticks );
ticks++;
cudaMemcpy( cpu_bitmap, gpu_bitmap,DIM*DIM*4 , cudaMemcpyDeviceToHost );
//visualize cpu_bitmap
glBegin(GL_POINTS);
for(int x=0; x<DIM; x++)
{
for(int y=0; y<DIM; y++)
{
int offset = x + y * DIM;
glColor3f((cpu_bitmap [offset*4 + 0]/255.0f),
(cpu_bitmap [offset*4 + 1]/255.0f),
(cpu_bitmap [offset*4 + 1]/255.0f));
glVertex2f(x,y);
}
}
glEnd();
glutSwapBuffers();
}
cudaFree( gpu_bitmap );
glutPostRedisplay();
}
void reshape_cb (int w, int h)
{
if (w==0||h==0) return;
glViewport(0,0,w,h);
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
gluOrtho2D(0,w,0,h);
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
}
void initialize()
{
glutInitDisplayMode (GLUT_RGBA|GLUT_DOUBLE);
glutInitWindowSize (DIM,DIM);
glutInitWindowPosition (100,100);
glutCreateWindow ("Ventana OpenGL");
glutDisplayFunc (display_cb);
glutReshapeFunc (reshape_cb);
glClearColor(0.f,0.f,0.f,1.f);
}
int main (int argc, char **argv)
{
glutInit (&argc, argv);
initialize();
glutMainLoop();
return 0;
}
*/
|
13,172 | //
// Created by igor on 10.04.2021.
//
#include "Sphere.cuh"
Sphere::Sphere(const Vector3 ¢er, double radius, const ColorF &color) : radius(radius), center(center),
color(color) {
}
|
13,173 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// 2 - 2d block of threads --> 4 values in each dimension of x and y , grid = 2
__global__ void unique_gid_calculation2d(int * input)
{
int tid = blockDim.x * threadIdx.y + threadIdx.x;
int num_threads_per_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_per_block;
int num_threads_in_row = num_threads_per_block * gridDim.x;
int row_offset = num_threads_in_row * blockIdx.y;
int gid = tid + block_offset + row_offset;
printf("blockIdx.x : %d, blockIdx.y : %d, threadIdx.x : %d, gid : %d - data : %d \n", blockIdx.x, blockIdx.y, tid, gid, input[gid]);
}
int main()
{
int array_size = 16;
int array_bite_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 64, 12, 1, 33, 22, 11, 9, 12, 13, 89, 90, 77};
for (int i=0; i < array_size; i++) {
printf("%d ", h_data[i]);
}
printf ("\n \n");
int * d_data;
cudaMalloc((void **)&d_data, array_bite_size);
cudaMemcpy(d_data, h_data, array_bite_size, cudaMemcpyHostToDevice);
dim3 block(2,2);
dim3 grid(2,2);
//unique_idx_calc_threadIdx<<<grid, block>>>(d_data);
unique_gid_calculation2d<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
13,174 | #include"tfidf.cuh"
#include<cmath>
//TODO: parallelize
void tfidf(int rows, int cols, float * matrix) {
int numDocs = rows;
int numTerms = cols;
for (int termIdx = 0; termIdx < numTerms; termIdx++) {
int includeDocs = 0;
for (int docIdx = 0; docIdx < numDocs; docIdx++) {
if (matrix[docIdx * cols + termIdx] > 0) {
includeDocs++;
}
}
float termIdf = log10(numDocs / float(includeDocs + 1));
for (int docIdx = 0; docIdx < numDocs; docIdx++) {
matrix[docIdx * cols + termIdx] *= termIdf;
}
}
} |
13,175 | extern "C"
{
__global__ void Dstanh(const int lengthX, const double sf, const double *gradc, const double *fc, double *gradn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
gradn[i] += sf*gradc[i]*(1.0-(fc[i]/sf)*(fc[i]/sf));
}
}
} |
13,176 | #include <iostream>
__global__
void square(const float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i] * A[i];
}
}
int main(void)
{
int numElements = 50000;
size_t size = numElements * sizeof(float);
std::cout << "[Vector addition of " << numElements << " elements]\n";
float *h_A = new float[numElements];
float *h_B = new float[numElements];
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
}
float *d_A, *d_B;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 1024;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid
<< " blocks of " << threadsPerBlock << " threads\n";
square<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, numElements);
cudaDeviceSynchronize();//wait for all threads to be finished
cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] * h_A[i] - h_B[i]) > 1e-5)
{
std::cerr << "Result verification failed at element " << i << "!\n";
exit(EXIT_FAILURE);
}
}
std::cout << "Test PASSED\n";
cudaFree(d_A);
cudaFree(d_B);
delete [] h_A;
delete [] h_B;
std::cout << "Done\n";
return 0;
}
|
13,177 | #include <iostream>
#include <math.h>
#include <random>
#include <fstream>
__device__ float square(float x) {
return x*x;
}
struct Tree_Info {
int n;
double corner[3];
double size; // assume cube
};
// Just build the tree in serial
// DOES NOT WORK
void buildtree(int n, float *r, float *h, float *coldens, int *tree_children, Tree_Info *tree_info) {
// find max/min size
int ii,kk;
float *r_i;
for ( kk=0 ; kk<3 ; kk++) tree_info->corner[kk] = r[kk];
tree_info->size = 0.;
for ( ii=1 ; ii<n ; ii++ ) {
r_i = &(r[ii*3]);
for ( kk=0 ; kk<3 ; kk++ ) {
if ( r_i[kk]<tree_info->corner[kk] ) {
tree_info->size+=tree_info->corner[kk]-r_i[kk];
tree_info->corner[kk]-=r_i[kk];
}
if ( r_i[kk]>tree_info->corner[kk] ) {
tree_info->size+=r_i[kk]-tree_info->corner[kk];
}
}
}
std::cout << "Corner, size: " << tree_info->corner[0] << " " << tree_info->corner[1] << " " << tree_info->corner[2] << " " << tree_info->size << std::endl;
}
// CUDA Kernel function function to calc number of collisions per particle
__global__ void calcray(int n, float *r, float *h, float *coldens, Tree_Info *tree_info) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
int i,j,k;
float crossp;
float norm2,h2,dot;
float *r_i,*r_j;
float dr[3];
i = index;
coldens[i] = 0.;
r_i = &(r[i*3]);
norm2 = 0.;
for (k=0 ; k<3 ; k++) norm2+=square(r_i[k]);
for (j=0 ; j<n ; j++) {
r_j = &(r[j*3]);
// check if particle is in-between origin and target
dot = 0.;
for (k=0 ; k<3 ; k++) dot+=r_i[k]*r_j[k];
if ( dot>0. && dot<norm2 ) {
// check if ray intersects particle
for (k=0 ; k<3 ; k++) dr[k] = r_i[k] - r_j[k];
crossp = square( r_j[1]*dr[2]-r_j[2]*dr[1]);
crossp+= square(-r_j[0]*dr[2]+r_j[2]*dr[0]);
crossp+= square( r_j[0]*dr[1]-r_j[1]*dr[0]);
h2 = square(h[j]);
if ( crossp<=h2*norm2 ) {
coldens[index]+=1.;
}
}
}
}
int main(void) {
// int N = 690286; // for comparison with tree code
int N = 10000; // for quick tests
float *r,*h,*coldens;
float *d_coldens;
int *tree_children;
struct Tree_Info *tree_info;
int tree_memory_factor = 8;
cudaMallocManaged(&r, N*sizeof(float)*3);
cudaMallocManaged(&h, N*sizeof(float));
cudaMalloc(&d_coldens, N*sizeof(float));
cudaMallocManaged(&tree_children, N*sizeof(int)*tree_memory_factor);
cudaMallocManaged(&tree_info, sizeof(tree_info));
coldens = new float[N];
std::default_random_engine generator;
std::uniform_real_distribution<float> locDistribution(-1.0,1.0);
// std::uniform_real_distribution<float> hDistribution(0.005,0.02);
std::uniform_real_distribution<float> hDistribution(0.05,0.1);
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
r[i*3] = locDistribution(generator);
r[i*3+1] = locDistribution(generator);
r[i*3+2] = locDistribution(generator);
h[i] = hDistribution(generator);
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
buildtree(N,r,h,d_coldens,tree_children,tree_info);
calcray<<<numBlocks, blockSize>>>(N,r,h,d_coldens,tree_info);
cudaMemcpy(coldens, d_coldens, N*sizeof(float), cudaMemcpyDeviceToHost);
//
std::cout << "coldens0 " << coldens[0] << std::endl;
// Free memory
cudaFree(r);
cudaFree(h);
cudaFree(coldens);
cudaFree(d_coldens);
cudaFree(tree_children);
cudaFree(tree_info);
return 0;
}
|
13,178 | #include "includes.h"
__global__ void KernelLBSSimple(int aCount, const int* b_global, int bCount, int* indices_global) {
__shared__ int data_shared[NT * VT];
int tid = threadIdx.x;
// Load bCount elements from B into data_shared.
int x[VT];
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < bCount) x[i] = b_global[index];
}
#pragma unroll
for(int i = 0; i < VT; ++i)
data_shared[NT * i + tid] = x[i];
__syncthreads();
// Each thread searches for its Merge Path partition.
int diag = VT * tid;
int begin = max(0, diag - bCount);
int end = min(diag, aCount);
while(begin < end) {
int mid = (begin + end)>> 1;
int aKey = mid;
int bKey = data_shared[diag - 1 - mid];
bool pred = aKey < bKey;
if(pred) begin = mid + 1;
else end = mid;
}
int mp = begin;
// Sequentially search, comparing indices a to elements data_shared[b].
// Store indices for A in the right-side of the shared memory array.
// This lets us complete the search in just a single pass, rather than
// the search and compact passes of the generalized vectorized sorted
// search function.
int a = mp;
int b = diag - a;
#pragma unroll
for(int i = 0; i < VT; ++i) {
bool p;
if(b >= bCount) p = true;
else if(a >= aCount) p = false;
else p = a < data_shared[b];
if(p)
// If a < data_shared[b], advance A and store the index b - 1.
data_shared[bCount + a++] = b - 1;
else
// Just advance b.
++b;
}
__syncthreads();
// Store all indices to global memory.
for(int i = tid; i < aCount; i += NT)
indices_global[i] = data_shared[bCount + i];
} |
13,179 | // reverseArray.cu
// Chenfeng Hao
// HW 7
// Using a second array
#include <iostream>
#include <cstdlib>
#include <chrono>
using namespace std;
#define ARRAY_SIZE 20
#define BLOCK_SIZE 4
__global__ void cu_reverseArray(int arr_in[], int arr_out[]) {
// compute thread index
// use it to retrieve block and thread IDs
int x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
arr_out[x] = arr_in[ARRAY_SIZE - x - 1];
}
int main(int argc, char *argv[]) {
// declare arrays and initialize to 0
int arr_in[ARRAY_SIZE];
int arr_out[ARRAY_SIZE];
// srand(0);
for (int i = 0; i < ARRAY_SIZE; i++) {
arr_in[i] = i;
}
// print initial arrays
for (int i = 0; i < ARRAY_SIZE; i++) {
cout << arr_in[i] << ' ';
}
cout << endl << endl;
int *arr_in_d;
int *arr_out_d;
cudaError_t result;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate space on the device
result = cudaMalloc((void **) &arr_in_d, sizeof(int) * ARRAY_SIZE);
result = cudaMalloc((void **) &arr_out_d, sizeof(int) * ARRAY_SIZE);
if (result != cudaSuccess) {
cerr << "cudaMalloc (thread) failed." << endl;
exit(1);
}
// copy arrays from host to device
result = cudaMemcpy(arr_in_d, arr_in, sizeof(int) * ARRAY_SIZE, cudaMemcpyHostToDevice);
result = cudaMemcpy(arr_out_d, arr_out, sizeof(int) * ARRAY_SIZE, cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
cerr << "cudaMemcpy host->dev failed." << endl;
exit(1);
}
// set execution configuration
dim3 dimblock(BLOCK_SIZE);
dim3 dimgrid(ARRAY_SIZE / BLOCK_SIZE);
// call the kernel function
cudaEventRecord(start);
cu_reverseArray <<<dimgrid, dimblock>>>(arr_in_d, arr_out_d);
cudaEventRecord(stop);
// transfer results back to host
result = cudaMemcpy(arr_out, arr_out_d, sizeof(int) * ARRAY_SIZE, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
cerr << "cudaMemcpy host <- dev (thread) failed." << endl;
exit(1);
}
// release the memory on the GPU
result = cudaFree(arr_out_d);
result = cudaFree(arr_in_d);
if (result != cudaSuccess) {
cerr << "cudaFree (thread) failed." << endl;
exit(1);
}
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// re-print arrays
for (int i = 0; i < ARRAY_SIZE; i++) {
cout << arr_out[i] << ' ';
}
cout << endl << endl;
cout << "Reversing an array of size " << ARRAY_SIZE << " took " << milliseconds * 1000 << " microseconds." << endl;
return 0;
} |
13,180 | //This program calculate the distances of the points to a constant point
#include <iostream>
#include "cuda.h"
using namespace std;
#define N 512
struct Point3D
{
float x;
float y;
float z;
};
__device__ float distance3D(Point3D p1, Point3D p2)
{
return sqrtf( (p1.x - p2.x)*(p1.x - p2.x) +
(p1.y - p2.y)*(p1.y - p2.y) +
(p1.z - p2.z)*(p1.z - p2.z) );
}
__global__ void distKernel(float *dDistArray, Point3D *dpfocus, Point3D *dpArray)
{
const int block_id = (blockDim.x * blockDim.y * blockDim.z) * (gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x);
const int thread_id = block_id + blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x;
//dDistArray[thread_id] = thread_id;
dDistArray[thread_id] = distance3D(*dpfocus , dpArray[thread_id]);
//dDistArray[0] = 1;
}
int main(void)
{
dim3 grid(2,2,2), block(2,4,8);
//float array to store distances
float *distArray=NULL;
float *dDistArray=NULL;
distArray = (float*)malloc(N * sizeof(float));
cudaMalloc((void**)&dDistArray, N * sizeof(float));
//points array to store points
Point3D *pArray=NULL;
Point3D *dpArray=NULL;
pArray = (Point3D*)malloc(N * sizeof(Point3D));
for(int i=0; i<N; i++){
pArray[i].x = i;
pArray[i].y = i+1;
pArray[i].z = i+2;
}
cudaMalloc((void**)&dpArray, N * sizeof(Point3D));
cudaMemcpy(dpArray, pArray, N * sizeof(Point3D), cudaMemcpyHostToDevice);
//point to store the focus
Point3D *pfocus = NULL;
Point3D *dpfocus = NULL;
pfocus = (Point3D*)malloc(sizeof(Point3D));
pfocus->x = 1;
pfocus->y = 1;
pfocus->z = 1;
cudaMalloc((void**)&dpfocus, sizeof(Point3D));
cudaMemcpy(dpfocus, pfocus, sizeof(Point3D), cudaMemcpyHostToDevice);
//point points
cout << "Focus point is: "
<< "{" << pfocus->x << "," << pfocus->y << "," << pfocus->z << "}" << endl;
cout << "Points are:" << endl;
for(int i=0; i<N; i++)
cout << "{" << pArray[i].x << "," << pArray[i].y << "," << pArray[i].z << "}" << endl;
//launch kernel
distKernel<<<grid,block>>>(dDistArray, dpfocus, dpArray);
cudaDeviceSynchronize();
//copy distances back
cudaMemcpy(distArray, dDistArray, N * sizeof(float), cudaMemcpyDeviceToHost);
cout << "Distances are:" << endl;
for(int i=0; i<N; i++)
cout << "distArray[" << i << "] = " << distArray[i] << endl;
}
|
13,181 | //nvcc -o lab5_2_1 lab5_2_1.cu
/*Author:
Pedro Silva
*/
/*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de
tamanho N. Teste para vários valores de N.*/
/*2.1. Implemente uma versão simples (sem recorrer a optimizações).*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void vectorsum2_1(int * d_buffer, int N){
//THREAD ID
int index = threadIdx.x + blockIdx.x * blockDim.x;
/*Temos N elementos no h_buffer. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento 16 indices a direita
Repetir até so termos um elemento (a cada iteração, temos metade dos elementos).*/
int num_of_threads = N;
int distance = N / 2; //Distancia entre elementos a somar
int primeiro, segundo;
//Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma.
while(num_of_threads > 1 && index < N/2){
primeiro = index;
segundo = primeiro + distance; //na primeira iteracao: 1a thread, index 1, 2a thread, index 3, 3a thread, index 5
d_buffer[primeiro] = d_buffer[primeiro] + d_buffer[segundo];
//passou uma iteracao: duplicar a distancia entre elementos a somar e dividir por 2 o numero de threads activas
distance = distance / 2;
num_of_threads = num_of_threads / 2;
//garantir que todas as threads fizeram a sua soma
__syncthreads();
}
}
int main(){
printf("Exercicio 2, Lab 5 de CHAD. Soma de todos os elementos de um h_buffer de tamanho N.\nN comeca a 8 (2^3)e duplica até 4096 (2^10).\n");
int *d_buffer, *result, *h_buffer;
int error;
struct timespec start, end;
double startTime, endTime;
for( int N = 256; N <= 4096; N = N*2){
printf("--------------------------------------------------------------------------\n");
printf("Soma de um vector com %i elementos.\n", N);
clock_gettime(CLOCK_MONOTONIC, &start);
//alocar memoria no device
if(cudaMalloc(&d_buffer, sizeof(int) * N) != cudaSuccess){
fprintf(stderr, "Error allocating memory on device.\n");
return(-1);
}
//alocar memoria no host para h_buffer
h_buffer = (int*) malloc(N * sizeof(int));
for(int i = 0; i < N; i++)
h_buffer[i] = 1;
//alocar memoria no host para receber o resultado de cada bloco
result = (int*) malloc(N * sizeof(int));
//Transferir dados do device para host (vector a somar)
if((error = cudaMemcpy(d_buffer, h_buffer, N * sizeof(int), cudaMemcpyHostToDevice)) != cudaSuccess)
fprintf(stderr, "Erro a transferir vector para GPU, de dimensao %i. Error = %i.\n", N, error);
//Inicializar block e grid size
dim3 BlockSize(32, 1, 1); //Comecar simples: Blocos de tamanho fixo
dim3 GridSize(N/32 + 1, 1, 1);
vectorsum2_1<<<GridSize, BlockSize>>>(d_buffer, N);
//Vamos buscar o resultado da soma ao primeiro elemento do d_buffer
cudaMemcpy(result, d_buffer, sizeof(int), cudaMemcpyDeviceToHost);
printf("Resultado da soma de um vector de %i elementos: %i.\n", N, *result);
if(cudaFree(d_buffer) != cudaSuccess)
printf("Erro a libertar memoria no device.\n");
clock_gettime(CLOCK_MONOTONIC, &end);
startTime = (start.tv_sec * 1e3) + (start.tv_nsec * 1e-6);
endTime = (end.tv_sec * 1e3) + (end.tv_nsec * 1e-6);
printf("Tempo de execução do GPU kernel: %fms.\n", endTime - startTime);
if(cudaFree(d_buffer) != cudaSuccess)
printf("Erro a libertar memoria no device para vector.\n");
free(h_buffer);
free(result);
}
return 0;
} |
13,182 | #include <assert.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int main(int argc, char *argv[])
{
if(argc != 2)
exit(2);
int N = atoi(argv[1]);
int first[N][N], second[N][N], multiply[N][N];
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
first[i][j] = rand()%10;
second[i][j] = rand()%10;
multiply[i][j] = 1;
}
}
//clock_t begin = clock();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
int sum = 0;
for (int k = 0; k < N; k++)
sum += first[i][k] * second[k][j];
multiply[i][j] = sum;
}
}
//clock_t end = clock();
//double time_spent = (double)(end - begin) / (CLOCKS_PER_SEC / 1000);
//fprintf(stdout, "%f", time_spent);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(stdout, "%f", milliseconds);
return 0;
}
|
13,183 | #include <stdio.h>
static void HandleError(cudaError_t err,const char * file,int line){
if(err!=cudaSuccess){
printf("%s in %s at line %d\n",cudaGetErrorString(err),file,line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err,__FILE__,__LINE__))
int getThreadNum(){
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
printf("gpu num %d\n",count);
HANDLE_ERROR(cudaGetDeviceProperties(&prop,0));
printf("max thread num:%d\n",prop.maxThreadsPerBlock);
printf("max grid dimensions:%d %d %d\n",prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv(float *img,float *kernel,float *result,int width,int height,int kernelSize){
int ti=threadIdx.x;
int bi=blockIdx.x;
int id=bi*blockDim.x+ti;
if(id>=width*height){
return ;
}
int row=id/width,col=id%width;
for(int i=0;i<kernelSize;i++){
for(int j=0;j<kernelSize;j++){
float imgValue=0;
int curRow=row-kernelSize/2+i;
int curCol=col-kernelSize/2+j;
if(curRow<0||curCol<0||curRow>=height||curCol>=width){
;
}else{
imgValue=img[curRow*width+curCol];
}
result[id]+=kernel[i*kernelSize+j]*imgValue;
}
}
}
int main(){
int width=10;
int height=10;
float *img=new float[width*height];
for(int i=0;i<height;i++){
for(int j=0;j<width;j++){
img[j+i*width]=(i+j)%256;
}
}
int kernelSize=3;
float *kernel=new float[kernelSize*kernelSize];
for(int i=0;i<kernelSize*kernelSize;i++){
kernel[i]=i%kernelSize-1;
}
float *imgGpu,*kernelGpu,*resultGpu;
HANDLE_ERROR(cudaMalloc((void**)&imgGpu,width*height*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&kernelGpu,kernelSize*kernelSize*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&resultGpu,width*height*sizeof(float)));
HANDLE_ERROR(cudaMemcpy(imgGpu,img,width*height*sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(kernelGpu,kernel,kernelSize*kernelSize*sizeof(float),cudaMemcpyHostToDevice));
int threadNum=getThreadNum();
int blockNum=(width*height-0.5)/threadNum+1;
conv<<<blockNum,threadNum>>>(imgGpu,kernelGpu,resultGpu,width,height,kernelSize);
float *result=new float[width*height];
HANDLE_ERROR(cudaMemcpy(result,resultGpu,width*height*sizeof(float),cudaMemcpyDeviceToHost));
//Visualization
printf("img:\n");
for(int i=0;i<10;i++){
for(int j=0;j<10;j++){
printf("%2.0f ",img[j+i*width]);
}
puts("");
}
printf("kernel:\n");
for(int i=0;i<kernelSize;i++){
for(int j=0;j<kernelSize;j++){
printf("%2.0lf ",kernel[i*kernelSize+j]);
}
puts("");
}
printf("result:\n");
for(int i=0;i<10;i++){
for(int j=0;j<10;j++){
printf("%2.0f ",result[j+i*width]);
}
puts("");
}
return 0;
} |
13,184 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <stdio.h>
#include <cooperative_groups.h>
#define FULL_MASK 0xffffffff
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
template <class T>
__global__ void
reduceRegr0(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? -log(1+exp(g_idata[i])) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduceRegr1(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? -log(1+exp(g_idata[i])) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduceRegr2(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? -log(1+exp(g_idata[i])) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
template <class T>
__global__ void
reduce0(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduce1(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduce2(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <class T>
__global__ void
reduceLog0(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? log(g_idata[i]) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduceLog1(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? log(g_idata[i]) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduceLog2(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? log(g_idata[i]) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
void reduceSum_d(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata,
int type) // 0=transform & addition, 1=addition, 2=log_addition
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(double) : threads * sizeof(double);
switch(type)
{
case 0:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceRegr0<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr1<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr2<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 1:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduce0<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce1<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce2<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 2:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceLog0<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog1<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog2<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
}
}
void reduceSum_f(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata,
int type) // 0=transform & addition, 1=addition, 2=mult
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
switch(type)
{
case 0:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceRegr0<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr1<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr2<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 1:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduce0<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce1<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce2<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 2:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceLog0<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog1<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog2<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
}
}
#endif // _REDUCE_KERNEL_H_
|
13,185 | //headers
#include <stdio.h>
#include <cuda.h> //standard cuda header file
//global variables
int inputLength = 5;
float *hostInput1 = NULL;
float *hostInput2 = NULL;
float *hostOutput = NULL;
float *deviceInput1 = NULL;
float *deviceInput2 = NULL;
float *deviceOutput = NULL;
//global kernel function definition
__global__ void vecAdd(float *in1, float *in2, float *out, int len)
{
//variable declaration
//row * width + column
int i = blockIdx.x * blockDim.x + threadIdx.x;
//code
if(i < len)
{
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char *argv[])
{
//function declaration
void cleanup(void);
//code
//allocate host memory
hostInput1 = (float *)malloc(inputLength * sizeof(float));
if(hostInput1 == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 1.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostInput2 = (float *)malloc(inputLength * sizeof(float));
if(hostInput2 == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 2.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostOutput = (float *)malloc(inputLength * sizeof(float));
if(hostOutput == NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Output Array.\nExiting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
//fill above input host vectors with arbitary but hard-coded data
hostInput1[0] = 101.0f;
hostInput1[1] = 102.0f;
hostInput1[2] = 103.0f;
hostInput1[3] = 104.0f;
hostInput1[4] = 105.0f;
hostInput2[0] = 201.0f;
hostInput2[1] = 202.0f;
hostInput2[2] = 203.0f;
hostInput2[3] = 204.0f;
hostInput2[4] = 205.0f;
//allocate the device memory
int size = inputLength * sizeof(float);
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)&deviceInput1, size);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceInput2, size);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceOutput, size);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//copy host memory contents to device memory
err = cudaMemcpy(deviceInput1, hostInput1, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceInput2, hostInput2, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//cuda kernel configuration
dim3 DimGrid = dim3(ceil(inputLength / 256.0), 1, 1);
dim3 DimBlock = dim3(256, 1, 1);
vecAdd<<<DimGrid, DimBlock>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
//copy device memory to host memory
err = cudaMemcpy(hostOutput, deviceOutput, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//result
int i;
for(i = 0; i < inputLength; i++)
{
printf("%f + %f = %f\n", hostInput1[i], hostInput2[i], hostOutput[i]);
}
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
//code
//free allocated device memory
if(deviceOutput)
{
cudaFree(deviceOutput);
deviceOutput = NULL;
}
if(deviceInput2)
{
cudaFree(deviceInput2);
deviceInput2 = NULL;
}
if(deviceInput1)
{
cudaFree(deviceInput1);
deviceInput1 = NULL;
}
//free allocated host memory
if(hostOutput)
{
free(hostOutput);
hostOutput = NULL;
}
if(hostInput2)
{
free(hostInput2);
hostInput2 = NULL;
}
if(hostInput1)
{
free(hostInput1);
hostInput1 = NULL;
}
}
|
13,186 | #include <cuda.h>
#include <stdio.h>
__global__ void kernel(void)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
printf("Hello World - Block : %d - Thread : %d - Global Thread ID : %d\n" ,
blockIdx.x, threadIdx.x,idx);
}
int main()
{
int num_threads, num_blocks;
printf("Enter number of blocks and threads per block !\n");
scanf("%d%d",&num_blocks,&num_threads);
kernel<<<num_blocks,num_threads>>>();
cudaDeviceSynchronize();
printf("Hello World !!\n");
} |
13,187 | #include "includes.h"
__global__ void reduceCompleteUnrollWarps8(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (8 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (8 * blockIdx.x) * blockDim.x;
if(idx + 7 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
g_idata[idx] += g_idata[idx + 4 * blockDim.x];
g_idata[idx] += g_idata[idx + 5 * blockDim.x];
g_idata[idx] += g_idata[idx + 6 * blockDim.x];
g_idata[idx] += g_idata[idx + 7 * blockDim.x];
}
__syncthreads();
if(blockDim.x >= 1024 && tid < 512)
idata[tid] += idata[tid + 512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256)
idata[tid] += idata[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128)
idata[tid] += idata[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64)
idata[idx] += idata[tid + 64];
__syncthreads();
if(tid < 32){
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
} |
13,188 | #include "includes.h"
#define DIMENSIONS 2
#define GPU_DEVICE_ZERO 0
__global__ void pointToThreadMove(int pointsCounter, int threadsInsideBlock, double dt, double *pointsInGpu, double *speedArrayInGpu)
{
/**
This function moves the thread with the right velocity readed from the file.
This function puts every point in ONE thread.
**/
int blockDimLeft=pointsCounter % blockDim.x;
if (blockIdx.x != gridDim.x - 1 || blockDimLeft > threadIdx.x)
{
int indexInArray=0;
while(indexInArray < DIMENSIONS)
{
int currentBlock=blockIdx.x * DIMENSIONS * threadsInsideBlock;
int currentThread=threadIdx.x* DIMENSIONS;
int currentGpuPoint = currentBlock + currentThread + indexInArray;
pointsInGpu[currentGpuPoint] += speedArrayInGpu[currentGpuPoint] * dt;
indexInArray++;
}
}
} |
13,189 | #include "includes.h"
#define GravConst 6.674e-11
#define EPS 1e-6
#define POINTS_SIZE 10
//struct Point {
// float x, y, z;
// float vx = 0, vy = 0, vz = 0;
// float ax = 0, ay = 0, az = 0;
// float m;
//};
__device__ inline float sqr(float x) { return x * x; }
__global__ void calc(float* points, float* resPoints, int dt, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tix = threadIdx.x;
int steps = gridDim.x;
float fx,fy,fz;
float x, y, z, m, vx, vy, vz, ax, ay, az;
if (idx < size) {
x = points[idx * POINTS_SIZE + 0];
y = points[idx * POINTS_SIZE + 1];
z = points[idx * POINTS_SIZE + 2];
m = points[idx * POINTS_SIZE + 3];
vx = points[idx * POINTS_SIZE + 4];
vy = points[idx * POINTS_SIZE + 5];
vz = points[idx * POINTS_SIZE + 6];
ax = points[idx * POINTS_SIZE + 7];
ay = points[idx * POINTS_SIZE + 8];
az = points[idx * POINTS_SIZE + 9];
for (int iteration = 0; iteration < steps; iteration++) {
__shared__ float cached_points[128 * POINTS_SIZE];
if (iteration * 128 + tix < size)
for (int i = 0; i < POINTS_SIZE; i++)
cached_points[tix * POINTS_SIZE] = points[(iteration * 128 + tix) * POINTS_SIZE + i];
__syncthreads();
if (idx < size) {
fx = fy = fz = 0;
for (int i = 0; i < 128; i++) {
if (iteration * 128 + i < size && iteration * 128 + i != idx) {
float dx = cached_points[i * POINTS_SIZE] - x;
float dy = cached_points[i * POINTS_SIZE + 1] - y;
float dz = cached_points[i * POINTS_SIZE + 2] - z;
float dist = sqrt(dx * dx + dy * dy + dz * dz);
float F = (GravConst * m * cached_points[i * POINTS_SIZE + 3]) / (dist * dist + 0.001f * 0.001f);
fx += F * dx / dist;
fy += F * dy / dist;
fz += F * dz / dist;
}
__syncthreads();
}
resPoints[idx * POINTS_SIZE] = x + vx * dt + (ax * sqr(dt)) / 2;
resPoints[idx * POINTS_SIZE + 1] = y + vy * dt + (ay * sqr(dt)) / 2;
resPoints[idx * POINTS_SIZE + 2] = z + vz * dt + (az * sqr(dt)) / 2;
resPoints[idx * POINTS_SIZE + 3] = m;
resPoints[idx * POINTS_SIZE + 4] = vx + ax * dt;
resPoints[idx * POINTS_SIZE + 5] = vy + ay * dt;
resPoints[idx * POINTS_SIZE + 6] = vz + az * dt;
resPoints[idx * POINTS_SIZE + 7] = fx / m;
resPoints[idx * POINTS_SIZE + 8] = fy / m;
resPoints[idx * POINTS_SIZE + 9] = fz / m;
}
}
}
/*if (idx < size) {
float* forcesArr = forcesCalc(points, idx, size);
resPoints[idx] = points[idx] + points[idx + 4] * dt + (points[idx + 7] * sqr(dt)) / 2;
resPoints[idx + 1] = points[idx + 1] + points[idx + 5] * dt + (points[idx + 8] * sqr(dt)) / 2;
resPoints[idx + 2] = points[idx + 2] + points[idx + 6] * dt + (points[idx + 9] * sqr(dt)) / 2;
resPoints[idx + 3] = points[idx + 3];
resPoints[idx + 4] = points[idx + 4] + points[idx + 7] * dt;
resPoints[idx + 5] = points[idx + 5] + points[idx + 8] * dt;
resPoints[idx + 6] = points[idx + 6] + points[idx + 9] * dt;
resPoints[idx + 7] = forcesArr[0] / points[idx + 3];
resPoints[idx + 8] = forcesArr[1] / points[idx + 3];
resPoints[idx + 9] = forcesArr[2] / points[idx + 3];
}*/
} |
13,190 | #include "includes.h"
__global__ void kmeans (short int *input, short int*centroids, int*newcentroids, int *counter, const int n)
{
int Dim = 2;
int i = (blockIdx.x * blockDim.x + threadIdx.x)*Dim;
if ( i < n ) {
// map
int point_d0 = input[i+0];
int point_d1 = input[i+1];
int k0_d0 = point_d0 - centroids[0];
int k0_d1 = point_d1 - centroids[1];
int k1_d0 = point_d0 - centroids[2];
int k1_d1 = point_d1 - centroids[3];
k0_d0 *= k0_d0;
k0_d1 *= k0_d1;
k1_d0 *= k1_d0;
k1_d1 *= k1_d1;
// reduce sum
k0_d0 = k0_d0 + k0_d1;
k1_d0 = k1_d0 + k1_d1;
// reduce min
int k = (k0_d0 < k1_d0 ) ? 0 : 1;
// add current point to new centroids sum
atomicAdd(&(newcentroids[Dim*k]), point_d0);
atomicAdd(&(newcentroids[Dim*k+1]),point_d1);
atomicAdd(&(counter[k]),1);
} // if
} |
13,191 | __device__
int __attribute__ ((noinline)) add(int a, int b) {
return a + b;
}
extern "C"
__global__
void vecAdd(int *l, int *r, int *p, size_t N, size_t iter1, size_t iter2) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
for (size_t i = 0; i < iter1; ++i) {
if (idx < N) {
p[idx] = add(l[idx], r[idx]);
}
}
for (size_t i = 0; i < iter2; ++i) {
if (idx < N) {
p[idx] = add(l[idx], r[idx]);
}
}
}
|
13,192 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <device_launch_parameters.h>
#include <cuda_runtime_api.h>
#include <iostream>
// CUDA kernel. Each thread takes care of one element of c
__global__ void deviceIteration(int *c, int *a, int H, int W)
{
// Tomamos el indice y lugar donde se calculará si vive o no.
const int k = blockIdx.x * blockDim.x + threadIdx.x;
// En caso de pasarnos no lo consideramos.
if (k < W*H) {
// Calculamos la posición en la matriz.
const int i = k / H;
const int j = k % H;
int sum = 0;
// Ahora obtenemos los indices de los vecinos.
const int left = (i + W - 1) % W;
const int right = (i + 1) % W;
const int down = (j + 1) % H;
const int up = (j + H - 1) % H;
// Calculamos la suma de los valores vecinos.
// left
if (a[right * H + j])
sum++;
// right
if (a[left * H + j])
sum++;
// up
if (a[i * H + up])
sum++;
// down
if (a[i * H + down])
sum++;
// upright
if (a[right * H + up])
sum++;
// downright
if (a[right * H + down])
sum++;
// upleft
if (a[left * H + up])
sum++;
// downleft
if (a[left * H + down])
sum++;
int value = a[k];
int result = 0;
// Guardamos el resultado obtenido, si esta vivo o no
if ((value == 1 && (sum == 2 || sum == 3)) || (value == 0 && (sum == 3 || sum == 6))) {
result = 1;
}
// Lo dejamos en la matriz final.
c[k] = result;
}
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void deviceIterationNotIf(int *c, int *a, int H, int W)
{
// Tomamos el indice y lugar donde se calculará si vive o no.
const int k = blockIdx.x * blockDim.x + threadIdx.x;
// En caso de pasarnos no lo consideramos.
if (k < W*H) {
// Calculamos la posición en la matriz.
const int i = k / H;
const int j = k % H;
int sum = 0;
// Ahora obtenemos los indices de los vecinos.
const int left = (i + W - 1) % W;
const int right = (i + 1) % W;
const int down = (j + 1) % H;
const int up = (j + H - 1) % H;
// Calculamos la suma de los valores vecinos.
// left
sum += a[right * H + j];
// right
sum += a[left * H + j];
// up
sum += a[i * H + up];
// down
sum += a[i * H + down];
// upright
sum += a[right * H + up];
// downright
sum += a[right * H + down];
// upleft
sum += a[left * H + up];
// downleft
sum += a[left * H + down];
int value = a[k];
// Guardamos el resultado obtenido, si esta vivo o no
c[k] = (value == 1 && (sum == 2 || sum == 3)) || (value == 0 && (sum == 3 || sum == 6));
}
}
void run(int* a, int W, int H){
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//Device output vector
int *d_c;
// Size, in bytes, of each vector
size_t bytes = W*H*sizeof(int);
// Allocate memory for each vector on host
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc((void **) &d_a, bytes);
cudaMalloc((void **) &d_c, bytes);
// Copy host vectors to device
cudaMemcpy(d_a, a, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize, n;
// Tamaño de la matriz.
n = H*W;
// Tamaño del bloque. Elegir entre 32 y 31.
//blockSize = 32;
blockSize = 8;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
deviceIterationNotIf<<< gridSize, blockSize >>>(d_c, d_a, H, W);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
int i,j;
for(i=0; i<W; i++) {
for(j=0; j<H; j++) {
a[i*H + j] = h_c[i*H + j];
}
}
// Release device memory
cudaFree(d_a);
cudaFree(d_c);
// Release host memory
free(h_c);
}
|
13,193 | #include "includes.h"
//#define DEBUG
//#define HANDLE_ERROR(x) if((x) != 0) cout << "Error!" << endl;
using namespace std;
struct SubBlock{
int * nnz_global_i_idx;
int * nnz_global_o_idx;
int nnz;
int * nnz_local_r_idx;
int * nnz_local_c_idx;
float * nnz_values;
};
//void printSubBlocksInfo(SubBlock * sbs, int nsbs, int mem_b_size);
__global__ void cudaDummy(){
} |
13,194 | // past distance driven
#include <math.h>
#define ABS(x) ((x) > 0 ? (x) : - (x))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define PI 3.141592653589793
__global__ void kernel_backprojection(float *img, float *proj, float angle, float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz){
int ix = 16 * blockIdx.x + threadIdx.x;
int iy = 16 * blockIdx.y + threadIdx.y;
int iz = 4 * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
img[id] = -1.0f;return;
float cphi, sphi, x1, y1, z1, xc, yc, zc, xc0, yc0, a, b;// x20, y20, x2, y2, z2, x2n, y2n, z2n, x2m, y2m, p2x, p2y, p2z, p2xn, p2yn, p2zn, ptmp;
// float ds, dt, temp, dst, det;
// float xc, yc, zc, xcn, ycn, zcn, xcm, ycm, xc0, yc0;
// float as, ae, bs, be, atmp, btmp, dsp, dtp, L;
int ia, ib;
angle += PI;
cphi = (float)__cosf(angle);
sphi = (float)__sinf(angle);
x1 = -SO;
y1 = 0.0f;
z1 = 0.0f;
xc = ix + 0.5f - nx / 2;
yc = iy + 0.5f - ny / 2;
zc = iz + 0.5f - nz / 2;
xc0 = xc * cphi + yc * sphi;
yc0 = -xc * sphi + yc * cphi;
a = (xc0 - x1) / SD * yc0 / da - ai;
b = (xc0 - x1) / SD * zc / db - bi;
//if (a < 0 || a >= na)
// return;
//if (b < 0 || b >= nb)
// return;
ia = (int)floor(a);
ib = (int)floor(b);
img[id] -= ia + ib * na;//proj[ib + (na - 1 - ia) * nb];
} |
13,195 | // Multiply two matrices A * B = C
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define WA 3 // Matrix A width
#define HA 3 // Matrix A height
#define WB 3 // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void multiply(float* h_A, float* h_B, float* C, int size_C)
{
float sum;
for (int row=0; row < size_C; row++){
for (int col=0; col < size_C; col++){
sum = 0.f;
for (int n=0; n<size_C; n++){
sum += h_A[row*size_C+n]*h_B[n*size_C+col];
}
C[row*size_C+col] = sum;
}
}
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
// set seed for rand()
srand(2006);
// 1. allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. print out A and B
printf("\n\nMatrix A\n");
for(int i = 0; i < size_A; i++)
{
printf("%f ", h_A[i]);
if(((i + 1) % WA) == 0)
printf("\n");
}
printf("\n\nMatrix B\n");
for(int i = 0; i < size_B; i++)
{
printf("%f ", h_B[i]);
if(((i + 1) % WB) == 0)
printf("\n");
}
// 4. allocate host memory for the result C
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
// 5. perform the calculation
multiply(h_A, h_B, h_C, size_C);
// 6. print out the results
printf("\n\nMatrix C (Results)\n");
for(int i = 0; i < size_C; i++)
{
printf("%f ", h_C[i]);
if(((i + 1) % WC) == 0)
printf("\n");
}
printf("\n");
// 7. clean up memory
free(h_A);
free(h_B);
free(h_C);
}
|
13,196 | #include <iostream>
#include "../include/glist.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
__global__ void test(float *output){
gpu_stl::list<float> list;
int idx = 0;
output[idx++] = (float)list.empty();
output[idx++] = (float)list.size();
for(int i=0;i<10;++i){
list.push_back(i*1.7);
output[idx++] = (float)list.empty();
output[idx++] = (float)list.size();
}
for(int i=0;i<6;++i) {
auto p = list.insert(--list.end(), 55);
++p;
++p;
list.insert(p, 77);
}
for(auto p=list.begin(); p!=list.end();++p) output[idx++] = *p;
output[idx++] = list.front();
output[idx++] = *list.begin();
*list.begin() = 3.1415926;
output[idx++] = list.front();
output[idx++] = *list.begin();
output[idx++] = 10086;
output[idx++] = list.back();
output[idx++] = *(--list.end());
*(--list.end()) = 3.124235;
output[idx++] = list.back();
output[idx++] = *(--list.end());
gpu_stl::list<float>::iterator p;
while((p=list.find(77))!=list.end()){
list.erase(p);
}
output[idx++] = 10086;
for(auto p=list.begin(); p!=list.end();++p) output[idx++] = *p;
list.reverse();
output[idx++] = 10086;
for(auto p=list.begin(); p!=list.end();++p) output[idx++] = *p;
list.pop_front();
list.pop_back();
output[idx++] = 10086;
for(auto p=list.begin(); p!=list.end();++p) output[idx++] = *p;
gpu_stl::list<float>::iterator p1(--list.end());
output[idx++] = 10086;
output[idx++] = *p1;
list.clear();
output[idx++] = 10086;
output[idx++] = list.empty();
output[idx++] = 10086;
output[idx++] = list.size();
output[idx++] = max(123141, 2335436);
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
int main(){
def_dvec(float) dev_out(120, 0);
test<<<10, 10>>>(to_ptr(dev_out));
for(auto k:dev_out) cout<<k<<' ';
cout<<endl;
return 0;
}
|
13,197 | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// An example global device symbol.
__device__ int deviceArray[4];
int hostArray[4];
namespace gpu_runtime {
namespace testing {
// An example of a global device symbol in a namespace.
__device__ int nestedDeviceArray[4];
} // namespace testing
} // namespace gpu_runtime
// Add two integer arrays and store the result in the first array.
//
// Assumes blockDim.y = blockDim.z = gridDim.x = gridDim.y = gridDim.z = 1.
__global__ void addKernel(int* a, int* b) { a[threadIdx.x] += b[threadIdx.x]; }
// Reports which threads and blocks ran this kernel.
__global__ void reportThreadsKernel(int* threadIdsX, int* threadIdsY,
int* threadIdsZ, int* blockIdsX,
int* blockIdsY, int* blockIdsZ) {
int threadXStride = 1;
int threadYStride = threadXStride * blockDim.x;
int threadZStride = threadYStride * blockDim.y;
int blockXStride = threadZStride * blockDim.z;
int blockYStride = blockXStride * gridDim.x;
int blockZStride = blockYStride * gridDim.y;
int tid = (threadXStride * threadIdx.x) + (threadYStride * threadIdx.y) +
(threadZStride * threadIdx.z) + (blockXStride * blockIdx.x) +
(blockYStride * blockIdx.y) + (blockZStride * blockIdx.z);
threadIdsX[tid] = threadIdx.x;
threadIdsY[tid] = threadIdx.y;
threadIdsZ[tid] = threadIdx.z;
blockIdsX[tid] = blockIdx.x;
blockIdsY[tid] = blockIdx.y;
blockIdsZ[tid] = blockIdx.z;
}
|
13,198 | /*
* Source.cpp
*
* Created on: 11 янв. 2016 г.
* Author: aleksandr
*/
#include "Source.h"
|
13,199 | #include "includes.h"
__device__ unsigned int locationAlgo(double *x, double xadv, unsigned int nx)
{
unsigned int location = 0;
while (x[location] < xadv && location < nx)
location++;
if(location == 0)
return location;
else
return location-1;
}
__global__ void find_advection_point_location_cuda(double *x, double *y, double *xadv, double *yadv, unsigned int nx, unsigned int ny, unsigned int *cellx, unsigned int *celly, unsigned int *tracker, double xlim1, double xlim2, double ylim1, double ylim2, unsigned int TileSize)
{
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int index_x = bx * TileSize + tx;
unsigned int index_y = by * TileSize + ty;
unsigned int indexToWrite = index_y * nx + index_x;
bool xoutofbounds = false;
bool youtofbounds = false;
if(!((xadv[indexToWrite] > xlim1) && (xadv[indexToWrite] < xlim2)))
xoutofbounds = true;
if(!((yadv[indexToWrite] > ylim1) && (yadv[indexToWrite] < ylim2)))
youtofbounds = true;
if(!xoutofbounds && !youtofbounds)
{
tracker[indexToWrite] = 1;
cellx[indexToWrite] = locationAlgo(x,xadv[indexToWrite],nx);
celly[indexToWrite] = locationAlgo(y,yadv[indexToWrite],ny);
}
else
if(!xoutofbounds && youtofbounds)
{
tracker[indexToWrite] = 2;
cellx[indexToWrite] = locationAlgo(x,xadv[indexToWrite],nx);
if(yadv[indexToWrite] <= ylim1)
celly[indexToWrite] = 0;
else
if(yadv[indexToWrite] >= ylim2)
celly[indexToWrite] = ny-2;
}
else
if(xoutofbounds && !youtofbounds)
{
tracker[indexToWrite] = 3;
celly[indexToWrite] = locationAlgo(y,yadv[indexToWrite],ny);
if(xadv[indexToWrite] <= xlim1)
cellx[indexToWrite] = 0;
else
if(xadv[indexToWrite] >= xlim2)
cellx[indexToWrite] = nx-2;
}
else
if(xoutofbounds && youtofbounds)
tracker[indexToWrite] = 4;
} |
13,200 | extern "C"
__global__ void sconv_fprop_K64_N64 (
float* param_test,
float *param_O,
const float *param_I,
const float *param_F,
float param_alpha,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_KRST,
int param_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ) {
__shared__ float share[64 * 8 * 4 + 8];
int tid = threadIdx.x;
share[tid] = 1;
*param_O = share[63-tid];
*param_test = share[63-tid];
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.