serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,101 | #include <stdio.h>
__global__ void clock_block(int kernel_time, int clockRate)
{
//This method will sleep for clockRate*kernel_time many clock ticks
// which is equivalent to sleeping for kernel_time milliseconds
int finish_clock;
int start_time;
for(int temp=0; temp<kernel_time; temp++){
start_time = clock();
finish_clock = start_time + clockRate;
bool wrapped = finish_clock < start_time;
while( clock() < finish_clock || wrapped) wrapped = clock()>0 && wrapped;
}
}
void *sleep_setup(cudaStream_t stream, char *filename)
{
//open file
FILE * ftp;
ftp = fopen(filename,"r");
printf("starting setup with %s\n", filename);
// read the kernel_time from the file
int *kernel_time = (int *) malloc(sizeof(int));
fscanf(ftp, "%d", kernel_time);
fclose(ftp);
printf("done with setup of %s\n", filename);
return (void *) kernel_time;
}
void sleep(cudaStream_t stream, void *setupResult)
{
//get the kernel time
int *kernel_time = (int *) setupResult;
//get the clock rate
int cuda_device = 0;
cudaDeviceProp deviceProp;
cudaGetDevice(&cuda_device);
cudaGetDeviceProperties(&deviceProp, cuda_device);
int clockRate = deviceProp.clockRate;
//launch the kernel in the stream
clock_block<<<1,1,1,stream>>>(*kernel_time, clockRate);
}
void sleep_finish(cudaStream_t s, char *filename, void *setupResult)
{
// opens the output file
int *kernel_time = (int *) setupResult;
FILE *out=fopen(filename, "w");
//write a nice little messege in it, including kernel_time
fprintf(out, "Finished sleep of duration: %d", *kernel_time);
//clean up
fclose(out);
free(kernel_time);
}
|
20,102 | #include "test_package.cuh"
#include <iostream>
#include <iomanip>
int main(void)
{
std::cout << std::setprecision(3);
std::cout << "pi is approximately " << estimate() << std::endl;
return 0;
}
|
20,103 | #include <ctime>
#include <cstdlib>
#include <cstdio>
__host__ void matrixMult(int* const a,int* const b,int* const c,const int n){
for(int i=0;i<n;i++)
for(int j=0;j<n;j++)
for(int k=0;k<n;k++)
c[i*n+j]+=a[i*n+k]*b[k*n+j];
return;
}
__host__ void printMatrix(const int* const a,const int n){
for(int i=0;i<n;i++){
for(int j=0;j<n;j++)
printf("%d ",a[i*n+j]);
printf("\n");
}
printf("\n");
}
__host__ void init(int* const a,const int n){
for(int i=0;i<n;i++)
for(int j=0;j<n;j++)
a[i*n+j]=rand()%10;
return;
}
__host__ bool validate(const int* const a,const int* const b,const int n){
for(int i=0;i<n;i++)
for(int j=0;j<n;j++)
if(a[i*n+j]!=b[i*n+j])
return false;
return true;
}
__host__ void cuAssert(cudaError_t error){
if(error!=cudaSuccess){
printf("%s\n",cudaGetErrorString(error));
exit(0);
}
}
__host__ void printTime(char* const s,struct timespec tstart,struct timespec tend){
printf("%s %.5f seconds\n",s,
((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) -
((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec));
}
__global__ void memsetGPUKernel(int* arr,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx<n && idy<n)
arr[idy*n+idx]=0;
return;
}
__host__ void memsetGPU(int* arr,int n){
int k=(n+15)/16;
dim3 grid(k,k);
dim3 block(16,16);
memsetGPUKernel<<<grid,block>>>(arr,n);
cuAssert(cudaGetLastError());
}
__global__ void naive(int* a,int* b,int* c,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx<n && idy<n){
for(int k=0;k<n;k++)
c[idx*n+idy]+=a[idx*n+k]*b[k*n+idy];
}
}
__global__ void common(int* a,int* b,int* c,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int idy=blockDim.y*blockIdx.y+threadIdx.y;
if(idx<n && idy<n){
int sum=0;
for(int k=0;k<n;k++)
sum+=a[idy*n+k]*b[k*n+idx];
c[idy*n+idx]+=sum;
}
}
__host__ int main(int argc,char* argv[]){
srand((unsigned)time(NULL));
struct timespec tstart={0,0}, tend={0,0};
int block_size;
const int n=1000;
printf("Matrix size %d*%d\n",n,n);
size_t bytes=sizeof(int)*n*n;
int* a = (int*)malloc(bytes);
int* b = (int*)malloc(bytes);
int* c0 = (int*)malloc(bytes);
int* c1 = (int*)malloc(bytes);
int* c2 = (int*)malloc(bytes);
int* d0 = (int*)malloc(bytes);
int* cpu = (int*)malloc(bytes);
init(a,n);
init(b,n);
memset(c0,0,bytes);
memset(c1,0,bytes);
memset(c2,0,bytes);
memset(d0,0,bytes);
memset(cpu,0,bytes);
int* da=NULL;//device a
int* db=NULL;
int* dc=NULL;
cuAssert(cudaMalloc((void**)&da,bytes));
cuAssert(cudaMalloc((void**)&db,bytes));
cuAssert(cudaMalloc((void**)&dc,bytes));
cuAssert(cudaMemcpy(da,a,bytes,cudaMemcpyHostToDevice));
cuAssert(cudaMemcpy(db,b,bytes,cudaMemcpyHostToDevice));
memsetGPU(dc,n);
//naive - non coalesed
dim3 grid(n,n,1);
dim3 block(1,1,1);
clock_gettime(CLOCK_MONOTONIC, &tstart);
naive<<<grid,block>>>(da,db,dc,n);
cuAssert(cudaDeviceSynchronize());
clock_gettime(CLOCK_MONOTONIC, &tend);
printTime("GPU 01x01/v1",tstart,tend);
cuAssert(cudaGetLastError());
cuAssert(cudaMemcpy(c0,dc,bytes,cudaMemcpyDeviceToHost));
memsetGPU(dc,n);
//change block size to 32x32 66,6% usage
block_size=32;
block.x=block_size;
block.y=block_size;
grid.x=(n+block_size-1)/block_size;
grid.y=(n+block_size-1)/block_size;
clock_gettime(CLOCK_MONOTONIC, &tstart);
naive<<<grid,block>>>(da,db,dc,n);
cuAssert(cudaDeviceSynchronize());
clock_gettime(CLOCK_MONOTONIC, &tend);
printTime("GPU 32x32/v1",tstart,tend);
cuAssert(cudaMemcpy(c1,dc,bytes,cudaMemcpyDeviceToHost));
memsetGPU(dc,n);
//change block size to 16x16 100% usage
block_size=16;
block.x=block_size;
block.y=block_size;
grid.x=(n+block_size-1)/block_size;
grid.y=(n+block_size-1)/block_size;
clock_gettime(CLOCK_MONOTONIC, &tstart);
naive<<<grid,block>>>(da,db,dc,n);
cuAssert(cudaDeviceSynchronize());
clock_gettime(CLOCK_MONOTONIC, &tend);
printTime("GPU 16x16/v1",tstart,tend);
cuAssert(cudaMemcpy(c2,dc,bytes,cudaMemcpyDeviceToHost));
memsetGPU(dc,n);
//common algoritm block size to 16x16 100% usage
block_size=16;
block.x=block_size;
block.y=block_size;
grid.x=(n+block_size-1)/block_size;
grid.y=(n+block_size-1)/block_size;
clock_gettime(CLOCK_MONOTONIC, &tstart);
common<<<grid,block>>>(da,db,dc,n);
cuAssert(cudaDeviceSynchronize());
clock_gettime(CLOCK_MONOTONIC, &tend);
printTime("GPU 16x16/v2",tstart,tend);
cuAssert(cudaMemcpy(d0,dc,bytes,cudaMemcpyDeviceToHost));
memsetGPU(dc,n);
cuAssert(cudaFree(da));
cuAssert(cudaFree(db));
cuAssert(cudaFree(dc));
clock_gettime(CLOCK_MONOTONIC, &tstart);
matrixMult(a,b,cpu,n);
clock_gettime(CLOCK_MONOTONIC, &tend);
printTime("CPU ",tstart,tend);
//printMatrix(cpu,n);
//printMatrix(c0,n);
//printMatrix(c1,n);
printf("validate 01x01/v1 / cpu = %s\n", validate(c0,cpu,n) ? "true" : "false");
printf("validate 32x32/v1 / cpu = %s\n", validate(c1,cpu,n) ? "true" : "false");
printf("validate 16x16/v1 / cpu = %s\n", validate(c2,cpu,n) ? "true" : "false");
printf("validate 16x16/v2 / cpu = %s\n", validate(d0,cpu,n) ? "true" : "false");
free(a);
free(b);
free(c0);
free(c1);
free(c2);
return 0;
}
|
20,104 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
struct timeval st, et;
__device__ __host__ void swap(int*, int, int);
void rng(int*, int);
int getMax(int*, int);
void buildDummy(int*, int, int, int);
__global__ void compareAndSwap(int*, int, int, int);
void impBitonicSortPar(int*, int, int);
void impBitonicSortSer(int*, int);
int getPowTwo(int);
void writeToFile(int*, int, char*);
bool isValid(int*, int*, int);
int main(int argc, char **argv) {
int n, dummy_n, t = 512;
if (argc < 2) {
printf("Usage: %s <n> <p>\nwhere <n> is problem size, <p> is number of thread (optional)\n", argv[0]);
exit(1);
}
if (argc == 3){
t = atoi(argv[2]);
}
n = atoi(argv[1]);
dummy_n = getPowTwo(n);
int *arr, *arr_ser, *d_arr;
arr = (int*) malloc(dummy_n*sizeof(int));
arr_ser = (int*) malloc(dummy_n*sizeof(int));
rng(arr,n);
int max_x = getMax(arr,n);
buildDummy(arr,n,dummy_n,max_x);
memcpy(arr_ser, arr, dummy_n*sizeof(int));
cudaMalloc((void **)&d_arr, dummy_n*sizeof(int));
cudaMemcpy(d_arr, arr, dummy_n*sizeof(int), cudaMemcpyHostToDevice);
// write random numbers to input file
writeToFile(arr,n,"./data/input");
// execute paralel
gettimeofday(&st,NULL);
impBitonicSortPar(d_arr,dummy_n,t);
gettimeofday(&et,NULL);
int elapsed_paralel = ((et.tv_sec - st.tv_sec) * 1000000) + (et.tv_usec - st.tv_usec);
printf("Execution paralel time: %d micro sec\n",elapsed_paralel);
// execute serial
gettimeofday(&st,NULL);
impBitonicSortSer(arr_ser,dummy_n);
gettimeofday(&et,NULL);
int elapsed_serial = ((et.tv_sec - st.tv_sec) * 1000000) + (et.tv_usec - st.tv_usec);
printf("Execution serial time: %d micro sec\n",elapsed_serial);
// calculate speedup
float speedup = (float)elapsed_serial/elapsed_paralel;
printf("Speedup : %.3f\n",speedup);
// calculate efficiency
float eff = 100*speedup/t;
printf("Efficiency : %.3f%\n",eff);
cudaMemcpy(arr, d_arr, dummy_n*sizeof(int), cudaMemcpyDeviceToHost);
// check test
bool valid = isValid(arr_ser,arr,dummy_n);
if(valid){
printf("Test Passed\n");
} else {
printf("Test Failed\n");
}
writeToFile(arr,n,"./data/output");
free(arr);
free(arr_ser);
cudaFree(d_arr);
return 0;
}
void writeToFile(int* arr, int n, char* path){
FILE* f = fopen(path,"w");
for(int i=0; i<n; i++) {
fprintf(f, "%d\n", arr[i]);
}
fclose(f);
}
void rng(int* arr, int n) {
int seed = 13515097;
srand(seed);
for(long i = 0; i < n; i++) {
arr[i] = (int)rand();
}
}
int getMax(int* arr, int n){
int max_x = arr[0];
for(int i=0; i<n; i++){
max_x = ((max_x<arr[i])?arr[i]:max_x);
}
return max_x;
}
void buildDummy(int* arr,int N,int dummy_n, int max_x){
for(long i = N; i < dummy_n; i++) {
arr[i]=max_x;
}
}
__device__ __host__ void swap(int* a, int i, int j) {
int t;
t = a[i];
a[i] = a[j];
a[j] = t;
}
__global__ void compareAndSwap(int* a, int n, int k, int j){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int ij=i^j;
if ((ij)>i) {
// monotonic increasing
if ((i&k)==0 && a[i] > a[ij]) swap(a,i,ij);
// monotonic decreasing
if ((i&k)!=0 && a[i] < a[ij]) swap(a,i,ij);
}
}
/*
Imperative paralel bitonic sort
*/
void impBitonicSortPar(int* a, int n, int t) {
int j,k;
int blocks = (n+t-1)/t;
int threads = t;
dim3 grid_dim(blocks,1);
dim3 block_dim(threads,1);
for (k=2; k<=n; k=2*k) {
for (j=k>>1; j>0; j=j>>1) {
compareAndSwap<<<grid_dim,block_dim>>>(a, n, k, j);
cudaDeviceSynchronize();
}
}
}
void impBitonicSortSer(int* a, int n){
int i,j,k;
for (k=2; k<=n; k=2*k) {
for (j=k>>1; j>0; j=j>>1) {
for (i=0; i<n; i++) {
int ij=i^j;
if ((ij)>i) {
// monotonic increasing
if ((i&k)==0 && a[i] > a[ij]) swap(a,i,ij);
// monotonic decreasing
if ((i&k)!=0 && a[i] < a[ij]) swap(a,i,ij);
}
}
}
}
}
int getPowTwo(int n){
int d=1;
while (d>0 && d<n) d<<=1;
return d;
}
bool isValid(int* arr1, int* arr2, int n){
for(int i=0; i<n; i++){
if(arr1[i]!=arr2[i]) return 0;
}
return 1;
}
|
20,105 | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16 // threads per block
void multiply_using_cpu(int *m1, int *m2, int *rcpu, int m, int n, int k) {
for (int row = 0; row < m; row++)
for (int col = 0; col < k; col++)
{
int sum = 0;
for (int i = 0; i < n; i++)
sum += m1[row * n + i] * m2[i * k + col];
rcpu[row * k + col] = sum;
}
}
__global__ void multiply_using_gpu(int *m1, int *m2, int *rgpu, int m, int n, int k)
{
// We no longer need to run 2 for loops over row & col, but rather
// we directly find the row & column assigned to the thread in the resultant matrix -
// as each thread is assigned to a specific element.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread in one specific block is identified by threadIdx.x and threadIdx.y.
// Each block is one specific grid is identified by blockIdx.x and blockIdx.y.
// Therefore, if we have threadIdx.x, threadIdx.y, blockIdx.x and blockIdx.y, we can locate one specific thread.
// Boundary protection - as there is chance of block having more threads than required
// There is no problem if k & m are multiples of BLOCK_SIZE, but otherwise there is.
if(col < k && row < m)
{
int sum = 0;
for(int i = 0; i < n; i++)
// summing up & accumulating result for a single cell in resultant matrix
sum += m1[row * n + i] * m2[i * k + col];
// write the result
rgpu[row * k + col] = sum;
}
}
void initialize_matrix(int *matrix, int m, int n) {
// We linearize the matrices to a 2D array and
// stack each row lengthways, from the first to the last. (Row Major Ordering)
for (int row = 0; row < m; row++)
for (int col = 0; col < n; col++)
matrix[row * n + col] = rand();
}
int check_for_discrepancies(int *r1, int *r2, int m, int n, int k) {
int isValidated = 1;
for (int i = 0; i < m; i++)
for (int j = 0; j < k; j++)
if(r1[i*k + j] != r2[i*k + j])
isValidated = 0;
return isValidated;
}
float test_case(int m, int n, int k) {
printf("Multiplying a %d x %d matrix with %d x %d matrix \n", m, n, n, k);
printf("---------------------\n\n");
// allocate memory space for matrices & results on the host
int *m1, *m2, *rcpu, *rgpu;
cudaMallocHost((void **) &m1, sizeof(int)*m*n); // Efficient way of writing - int *m1 = (int*)malloc(sizeof(int)*m*n);
cudaMallocHost((void **) &m2, sizeof(int)*n*k);
cudaMallocHost((void **) &rcpu, sizeof(int)*m*k);
cudaMallocHost((void **) &rgpu, sizeof(int)*m*k);
initialize_matrix(m1, m, n);
initialize_matrix(m2, n, k);
float gpu_time_ms, cpu_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the CPU version
cudaEventRecord(start, 0);
multiply_using_cpu(m1, m2, rcpu, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_time_ms, start, stop);
printf("CPU: %f ms.\n", cpu_time_ms);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// Allocate memory space on the device
int *dm1, *dm2, *dr;
cudaMalloc((void **) &dm1, sizeof(int)*m*n);
cudaMalloc((void **) &dm2, sizeof(int)*n*k);
cudaMalloc((void **) &dr, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(dm1, m1, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(dm2, m2, sizeof(int)*n*k, cudaMemcpyHostToDevice);
// We arrange the thread-blocks and grid in 2-D as we want to multiply a 2D matrix
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid(grid_cols, grid_rows);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// The function is a CUDA kernel, and is executed by an array of CUDA threads. All threads run the same code.
multiply_using_gpu<<<grid,threads>>>(dm1, dm2, dr, m, n, k);
// Copy back the results from the device to host
cudaMemcpy(rgpu, dr, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time_ms, start, stop);
printf("GPU: %f ms.\n", gpu_time_ms);
// Freeup device memory
cudaFree(dm1);
cudaFree(dm2);
cudaFree(dr);
printf("\n");
// Check for discrepencies & compute & show speedUp
if(check_for_discrepancies(rcpu, rgpu, m, n, k))
printf("Speedup = %f \n\n\n", cpu_time_ms / gpu_time_ms);
else
printf("Results from CPU & GPU are not matching. \n\n\n");
// Freeup host memory
cudaFreeHost(m1);
cudaFreeHost(m2);
cudaFreeHost(rcpu);
cudaFreeHost(rgpu);
return cpu_time_ms / gpu_time_ms; // return speedup
}
int main()
{
float s0, s1, s2, s3, s4;
s0 = test_case(128, 64, 128);
s1 = test_case(256, 128, 256);
s2 = test_case(512, 256, 512);
s3 = test_case(1024, 512, 1024);
s4 = test_case(2048, 1024, 2048);
printf("Summary of SpeedUps\n");
printf("---------------------\n");
printf("128 x 64 Matrix = %f\n", s0);
printf("256 x 128 Matrix = %f\n", s1);
printf("512 x 256 Matrix = %f\n", s2);
printf("1024 x 512 Matrix = %f\n", s3);
printf("2048 x 1024 Matrix = %f\n", s4);
return 0;
}
|
20,106 | #include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
#define TOTAL_DATA 256
#define BLOCK_PER_THREAT 64
#define BLOCK_SIZE 32
#define RADIUS 3
__global__ void stensil_1d (int *in, int *out){
__shared__ int temp[BLOCK_SIZE + 2*RADIUS];
int gIdx = threadIdx.x + (blockIdx.x*blockDim.x); // Global Index
int lIdx = threadIdx.x + RADIUS; // Local Index
// Read input elements into shared memory
temp[lIdx] = in[gIdx];
if(threadIdx.x < RADIUS) {
temp[lIdx - RADIUS] = in[gIdx - RADIUS];
temp[lIdx + BLOCK_SIZE] = in[gIdx + BLOCK_SIZE];
}
// Make sure all the threads are syncronized
__syncthreads();
// Apply the stencil
int result = 0;
for(int offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lIdx + offset];
}
// Store the output
out[gIdx] = result;
}
// main
int main(void)
{
int *a, *b;
int *d_a, *d_b;
int size = TOTAL_DATA * sizeof(int);
int i;
// Allocate memory for the Host
a = (int *) malloc (size);
b = (int *) malloc (size);
// Allocate memory for the Device
cudaMalloc ((void **) &d_a, size);
cudaMalloc ((void **) &d_b, size);
// Initialize data (0 - 9)
for(i=0; i<TOTAL_DATA;i++) {
a[i] = rand() % 10;
}
// Copy the data to
cudaMemcpy (a, d_a, size, cudaMemcpyHostToDevice);
// Lets execute it
stensil_1d<<<TOTAL_DATA/BLOCK_SIZE, BLOCK_SIZE>>> (d_a, d_b);
cudaMemcpy (b, d_b, size, cudaMemcpyDeviceToHost);
// Print the outcome
int j;
for(i=0;i<TOTAL_DATA;i++) {
printf("[%3d]\t", i);
for(j=0;j<2*RADIUS + 1;j++) printf("%d,", a[i+j]);
printf("\t--> %d\n", b[i]);
}
cudaFree (d_a);
cudaFree (d_b);
free (a);
free (b);
cudaGetDeviceCount (&j);
printf("Total Device = %d\n", j);
return 0;
}
|
20,107 |
extern "C" void cuda_hello( char * host_result,clock_t * time_used);
__global__ static void HelloCUDA(char* result, int num, clock_t* time)
{
int i = 0;
char p_HelloCUDA[] = "Hello CUDA!";
clock_t start = clock();
for(i = 0; i < num; i++) {
result[i] = p_HelloCUDA[i];
}
*time = clock() - start;
}
void cuda_hello( char* host_result,clock_t * time_used)
{
char *device_result = 0;
clock_t *time = 0;
cudaMalloc((void**) &device_result, sizeof(char) * 12);
cudaMalloc((void**) &time, sizeof(clock_t));
HelloCUDA<<<1, 1, 0>>>(device_result, 12 , time);
cudaMemcpy(host_result, device_result, sizeof(char) * 12, cudaMemcpyDeviceToHost);
cudaMemcpy(time_used, time, sizeof(clock_t), cudaMemcpyDeviceToHost);
cudaFree(device_result);
cudaFree(time);
} |
20,108 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void pSum(double* in1, double* output, int pass){
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x;// printf("bid0=%d\n",bid);
unsigned int gid = getGid3d3d();
extern __shared__ double sdata2[];
for(int i = blockDim.x>>1; i > 0; i>>=1){
if(tid < blockDim.x>>1){
sdata2[tid] += sdata2[tid + i];
}
__syncthreads();
}
if(tid==0){
output[bid] = sdata2[0];
}
} |
20,109 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_rd_streams, unsigned num_wr_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, int dummy)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
for (int i = 0; i < 1000 - 8; i += 8) {
A[id + 8*i*dummy] = i + 8;
A[id + 1*i*dummy] = i + 1;
A[id + 2*i*dummy] = i + 2;
A[id + 3*i*dummy] = i + 3;
A[id + 4*i*dummy] = i + 4;
A[id + 5*i*dummy] = i + 5;
A[id + 6*i*dummy] = i + 6;
A[id + 7*i*dummy] = i + 7;
}
}
int main(int argc, char *argv[])
{
int N = 1000;
float *d_x = (float *)100;
float *h_x;
h_x = (float *)malloc(N*8*sizeof(float));
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<1, 8>>>(0, 8, 100, 4100, 8100, 12100, 16100, 20100, 24100, 28100, atoi(argv[1]));
cudaMemcpy(h_x, d_x, N*8*sizeof(float), cudaMemcpyDeviceToHost);
for (unsigned i = 0 ; i < 8000 ; i++) {
printf("%f\n", *(h_x + i));
}
}
|
20,110 | #include <stdio.h>
#include <cuda.h>
#include <stdio.h>
int main() {
int num_gpus = 0;
cudaGetDeviceCount(&num_gpus);
for (int i = 0; i < num_gpus; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Memory size (MB): %ld\n\n", prop.totalGlobalMem/1024/1024);
}
}
|
20,111 | //#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <cuda.h>
//using namespace std;
//#include <ctime>
//#include "cuda_runtime.h"
//#include "curand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
//__device__ int _correct_cnt;
//
//__device__ float _res;
//__device__ float _arr[10][10];
//float arr[10][10];
//
//__global__ void test() {
// int ib = blockIdx.x;
// int ix = threadIdx.x;
// int iy = threadIdx.y;
// //for (int l = 0;l < 10;l++)
// // for (int m = 0;m < 10;m++)
// // _res += _arr[l][m];
// __shared__ float data[1024];
// int tid = threadIdx.y*blockDim.x+threadIdx.x;
// data[tid] = 0;
// if(ix<10&&iy<10)
// data[tid] = _arr[ix][iy];
// __syncthreads();
// for (int s = 1024 / 2; s > 0; s >>= 1) {
// if (tid < s)
// data[tid] += data[tid + s];
// __syncthreads();
// }
// if (tid == 0) {
// _res= data[0];
// }
//}
//int main() {
// for (int i = 0;i < 10; i++) {
// for (int j = 0;j < 10;j++) {
// arr[i][j] = rand() % 5;
// }
// }
// cudaMemcpyToSymbol(_arr, &arr, 10 * 10 * sizeof(float));
// float sum = 0;
// for (int i = 0;i < 10; i++) {
// for (int j = 0;j < 10;j++) {
// sum += arr[i][j];
// }
// }
// cout << "CPU sum: " <<sum << endl;
// test << <1, dim3(32,32)>> > ();
// float res=0;
// cudaMemcpyFromSymbol(&res, _res, sizeof(float));
// cout << "GPU sum: " << res << endl;
// return 0;
//} |
20,112 | #include <stdio.h>
#include <chrono>
#include <iostream>
__host__ __device__ float3 operator+(const float3 &a, const float3 &b)
{
return {a.x+b.x, a.y+b.y, a.z+b.z};
}
__host__ __device__ float3 operator-(const float3 &a, const float3 &b)
{
return {a.x-b.x, a.y-b.y, a.z-b.z};
}
__host__ __device__ void update(float3 &p, float3 &v, const int it)
{
p = p + v;
v = v + make_float3(0.1f*it, 0.1f*it, 0.1f*it);
}
__global__ void simKernel(float3 *particles, float3 *velocities, int iterations, int offset)
{
const int id = offset + blockIdx.x*blockDim.x + threadIdx.x;
for(size_t it = 0; it<iterations;++it)
{
update(particles[id], velocities[id], it);
}
}
int main()
{
int numParticles, numIterations, blockSize, nStream;
std::cin >> numParticles >> numIterations >>blockSize >>nStream;
int byteSize = numParticles * sizeof(float3);
const int streamSize = numParticles/nStream;
const int streamBytes = streamSize * sizeof(float);
// cudaEvent_t startEvent, stopEvent, dummyEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventCreate(&dummyEvent);
cudaStream_t *stream = new cudaStream_t[nStream];
for(size_t i=0; i<nStream; ++i)
{
cudaStreamCreate(&stream[i]);
}
float3 *gpu_particles;
float3 *gpu_velocities;
cudaMallocHost(&gpu_particles, byteSize);
cudaMallocHost(&gpu_velocities, byteSize);
for(size_t i=0; i<numParticles; ++i)
{
gpu_particles[i] = make_float3(.1f,.1f,.1f);
gpu_velocities[i] = make_float3(.01f,.01f,.01f);
}
//GPU SIMULATION:
auto start_gpu = std::chrono::high_resolution_clock::now();
float3 *dgpu_particles = 0;
float3 *dgpu_velocities = 0;
cudaMalloc(&dgpu_particles, byteSize);
cudaMalloc(&dgpu_velocities, byteSize);
for(int i=0; i<numIterations; ++i)
{
for(int j=0;j<nStream;++j)
{
int offset = j*streamSize;
cudaMemcpyAsync(dgpu_particles+offset, gpu_particles+offset,
streamBytes, cudaMemcpyHostToDevice, stream[j]);
cudaMemcpyAsync(dgpu_velocities+offset, gpu_velocities+offset,
streamBytes, cudaMemcpyHostToDevice, stream[j]);
simKernel<<<(streamSize+blockSize-1)/blockSize, blockSize,0,stream[j]>>>(dgpu_particles,
dgpu_velocities, 1, offset);
//cudaDeviceSynchronize();
cudaMemcpyAsync(gpu_particles+offset, dgpu_particles+offset,
streamBytes, cudaMemcpyDeviceToHost, stream[j]);
cudaMemcpyAsync(gpu_velocities+offset, dgpu_velocities+offset,
streamBytes, cudaMemcpyDeviceToHost, stream[j]);
}
}
cudaFree(dgpu_particles);
cudaFree(dgpu_velocities);
cudaFreeHost(gpu_particles);
cudaFreeHost(gpu_velocities);
auto end_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_gpu = end_gpu-start_gpu;
std::cout<<"GPU simulation time:"<<time_gpu.count()<<std::endl;
for(size_t i=0; i<nStream; ++i)
{
cudaStreamDestroy(stream[i]);
}
return 0;
} |
20,113 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d, gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
int nx, ny, nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx/block.x, ny/block.y, nz/block.z);
print_details<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
20,114 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define threadBlocks 1
#define threads 256
__global__ void printKernel()
{
printf("Hello World! My threadId is %d \n", threadIdx.x);
}
int main()
{
cudaDeviceSynchronize();
printKernel <<< threadBlocks, threads >>>();
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
} else {
printf("done");
}
return 0;
} |
20,115 | #include "includes.h"
/*
column
A[][] = ---------------------threadIdx.y
|
|
|
|
row |
|
|
|
|
threadIdx.x
*/
#define TILE_WIDTH 16
#define TILE_WIDTH 16
#define ar 311
#define ac_br 312
#define bc 115
using namespace std;
__global__ void mat_mul_shared(int *d_A, int *d_B, int *d_C, int rowA, int colA, int rowB, int colB, int rowC, int colC)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int row = tx + bx*TILE_WIDTH; // 0 to rowA/rowC
int col = ty + by*TILE_WIDTH; // 0 to colB/colC
__shared__ int s_A[TILE_WIDTH][TILE_WIDTH], s_B[TILE_WIDTH][TILE_WIDTH];
int cvalue = 0;
for(int i = 0; i < (colA+TILE_WIDTH-1)/TILE_WIDTH; i++)
{
if(row < rowA && i*TILE_WIDTH+ty < colA)
s_A[tx][ty] = d_A[row*colA + i*TILE_WIDTH+ty];
else
s_A[tx][ty] = 0;
if(i*TILE_WIDTH+tx < rowB && col < colB)
s_B[tx][ty] = d_B[(i*TILE_WIDTH+tx)*colB + col];
else
s_B[tx][ty] = 0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++)
cvalue += s_A[tx][k]*s_B[k][ty];
__syncthreads();
}
if(row < rowC && col < colC)
d_C[row*colC + col] = cvalue;
}// End of mat_mul_shared function |
20,116 | /**
* @file compare.cu
* @brief cuda arrayの比較の実装
* @author HIKARU KONDO
* @date 2021/08/31
*/
#include "array_scalar_add.cuh"
#define BLOCKDIM 256
template<typename T>
__global__ void arrayAddScalar(T *array, T *resArray, T scalar, int size) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) { return; }
resArray[idx] = array[idx] + scalar;
}
void floatArrayScalarAdd(float *array, float *resArray, float scalar, int size) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
arrayAddScalar<<< gridDim, blockDim >>> (array, resArray, scalar, size);
}
void doubleArrayScalarAdd(double *array, double *resArray, double scalar, int size) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
arrayAddScalar<<< gridDim, blockDim >>> (array, resArray, scalar, size);
}
|
20,117 | #include "cufft.h"
#ifndef pi
#define pi 4.0f*atanf(1.0f)
#endif
#ifndef threads_num
#define threads_num 256
#endif
/* functions for dst and dct transforms */
static __global__ void DataExtSetRedft00_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetRedft00_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void DataExtSetRedft10_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetRedft10_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void DataExtSetRedft01_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetRedft01_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void DataExtSetRodft00_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetRodft00_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void DataExtSetIRodft00_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetIRodft00_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void DataExtSetRodft10_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetRodft10_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void DataExtSetRodft01_1d(float *d_in, cufftComplex *d_out, int n);
static __global__ void DataSubGetRodft01_1d(cufftComplex *d_in, float *d_out, int n);
static __global__ void PhaseShiftForw_1d(cufftComplex *d_in, cufftComplex *d_out, int n);
static __global__ void PhaseShiftBack_1d(cufftComplex *d_in, cufftComplex *d_out, int n);
/* various dst and dct transforms */
void cufft_redft00_1d(float *d_in, float *d_out, int n);
void cufft_iredft00_1d(float *d_in, float *d_out, int n);
void cufft_redft10_1d(float *d_in, float *d_out, int n);
void cufft_redft01_1d(float *d_in, float *d_out, int n);
void cufft_rodft00_1d(float *d_in, float *d_out, int n);
void cufft_irodft00_1d(float *d_in, float *d_out, int n);
void cufft_rodft10_1d(float *d_in, float *d_out, int n);
void cufft_rodft01_1d(float *d_in, float *d_out, int n);
/* functions for first-order derivatives computed by dst and dct */
static __global__ void dst1_idct2_1d_pre(float *d_in, float *d_out, float dx, int n);
static __global__ void dst1_idct2_1d_post(float *d_in, float *d_out, int n);
static __global__ void dct1_idst2_1d_pre(float *d_in, float *d_out, float dx, int n);
static __global__ void dct1_idst2_1d_post(float *d_in, float *d_out, int n);
static __global__ void dst2_idct1_1d_pre(float *d_in, float *d_out, float dx, int n);
static __global__ void dst2_idct1_1d_post(float *d_in, float *d_out, int n);
static __global__ void dct2_idst1_1d_pre(float *d_in, float *d_out, float dx, int n);
static __global__ void dct2_idst1_1d_post(float *d_in, float *d_out, int n);
void cufft_dct2_idst1_1d(float *d_in, float *d_out, float dx, int nn)
{
int n = nn-1;
float *d_in_dst;
cudaMalloc((void **)&d_in_dst, n*sizeof(float));
/* Sine transform */
cufft_redft10_1d(d_in, d_in_dst, n);
float *d_in_tmp;
cudaMalloc((void **)&d_in_tmp, (n-1)*sizeof(float));
/* Multiplied by wavenumber */
dct2_idst1_1d_pre<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in_dst, d_in_tmp, dx, n);
float *d_out_tmp;
cudaMalloc((void **)&d_out_tmp, (n-1)*sizeof(float));
/* Inverse cosine transform */
cufft_irodft00_1d(d_in_tmp, d_out_tmp, n-1);
/* Get the result */
dct2_idst1_1d_post<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_tmp, d_out, n);
cudaFree(d_in_dst);
cudaFree(d_in_tmp);
cudaFree(d_out_tmp);
}
static __global__ void dct2_idst1_1d_pre(float *d_in, float *d_out, float dx, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
d_out[0] = 0.0f;
//for (int ix=tid; ix<n-1; ix+=threads_num)
if (ix < n-1)
d_out[ix] = -(float)(ix+1)*pi/((float)n*dx)*d_in[ix+1];
}
static __global__ void dct2_idst1_1d_post(float *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n-1; ix+=threads_num)
if (ix < n-1)
d_out[ix+1] = d_in[ix]/(2.0f*(float)(n));
d_out[n] = 0.0f;
}
void cufft_dst2_idct1_1d(float *d_in, float *d_out, float dx, int nn)
{
int n = nn-1;
float *d_in_dst;
cudaMalloc((void **)&d_in_dst, n*sizeof(float));
/* Sine transform */
cufft_rodft10_1d(d_in, d_in_dst, n);
float *d_in_tmp;
cudaMalloc((void **)&d_in_tmp, (n+1)*sizeof(float));
/* Multiplied by wavenumber */
dst2_idct1_1d_pre<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in_dst, d_in_tmp, dx, n);
float *d_out_tmp;
cudaMalloc((void **)&d_out_tmp, (n+1)*sizeof(float));
/* Inverse cosine transform */
cufft_redft00_1d(d_in_tmp, d_out_tmp, n+1);
/* Get the result */
dst2_idct1_1d_post<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_tmp, d_out, n);
cudaFree(d_in_dst);
cudaFree(d_in_tmp);
cudaFree(d_out_tmp);
}
static __global__ void dst2_idct1_1d_pre(float *d_in, float *d_out, float dx, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
d_out[0] = 0.0f;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix+1] = (float)(ix+1)*pi/((float)n*dx)*d_in[ix];
}
static __global__ void dst2_idct1_1d_post(float *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n+1; ix+=threads_num)
if (ix < n+1)
d_out[ix] = d_in[ix]/(2.0f*(float)(n));
}
void cufft_dct1_idst2_1d(float *d_in, float *d_out, float dx, int n)
{
float *d_in_dst;
cudaMalloc((void **)&d_in_dst, n*sizeof(float));
/* Sine transform */
cudaMemset((void *)&d_in[n-1], 0, sizeof(int));
cufft_redft00_1d(d_in, d_in_dst, n);
float *d_in_tmp;
cudaMalloc((void **)&d_in_tmp, (n-1)*sizeof(float));
/* Multiplied by wavenumber */
dct1_idst2_1d_pre<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in_dst, d_in_tmp, dx, n);
float *d_out_tmp;
cudaMalloc((void **)&d_out_tmp, (n-1)*sizeof(float));
/* Inverse cosine transform */
cufft_rodft01_1d(d_in_tmp, d_out_tmp, n-1);
/* Get the result */
dct1_idst2_1d_post<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_tmp, d_out, n);
cudaFree(d_in_dst);
cudaFree(d_in_tmp);
cudaFree(d_out_tmp);
}
static __global__ void dct1_idst2_1d_pre(float *d_in, float *d_out, float dx, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
d_out[0] = 0.0f;
//for (int ix=tid; ix<n-1; ix+=threads_num)
if (ix < n-1)
d_out[ix] = -(float)(ix+1)*pi/(float(n-1)*dx)*d_in[ix+1];
}
static __global__ void dct1_idst2_1d_post(float *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n-1; ix+=threads_num)
if (ix < n-1)
d_out[ix] = d_in[ix]/(2.0f*(float)(n-1));
d_out[n-1] = 0.0f;
}
void cufft_dst1_idct2_1d(float *d_in, float *d_out, float dx, int n)
{
float *d_in_dst;
cudaMalloc((void **)&d_in_dst, n*sizeof(float));
/* Sine transform */
cufft_rodft00_1d(d_in, d_in_dst, n);
float *d_in_tmp;
cudaMalloc((void **)&d_in_tmp, (n+1)*sizeof(float));
/* Multiplied by wavenumber */
dst1_idct2_1d_pre<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in_dst, d_in_tmp, dx, n);
float *d_out_tmp;
cudaMalloc((void **)&d_out_tmp, (n+1)*sizeof(float));
/* Inverse cosine transform */
cufft_redft01_1d(d_in_tmp, d_out_tmp, n+1);
/* Get the result */
dst1_idct2_1d_post<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_tmp, d_out, n);
cudaFree(d_in_dst);
cudaFree(d_in_tmp);
cudaFree(d_out_tmp);
}
static __global__ void dst1_idct2_1d_pre(float *d_in, float *d_out, float dx, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix+1] = (float)(ix+1)*pi/(float(n+1)*dx)*d_in[ix];
d_out[0] = 0.0f;
}
static __global__ void dst1_idct2_1d_post(float *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n-1; ix+=threads_num)
if (ix < n-1)
d_out[ix] = d_in[ix+1]/(2.0f*(float)(n+1));
d_out[n-1] = 0.0f;
}
void cufft_redft00_1d(float *d_in, float *d_out, int n)
{
/* Allocate memory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, (2*n-2)*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetRedft00_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n-2, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, (2*n-2)*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext, d_out_ext, CUFFT_FORWARD);
/* Subtract the transformed data */
DataSubGetRedft00_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_out_ext);
}
void cufft_iredft00_1d(float *d_in, float *d_out, int n)
{
cufft_redft00_1d(d_in, d_out, n);
}
void cufft_redft10_1d(float *d_in, float *d_out, int n)
{
/* Allocate meory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, 2*n*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetRedft10_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, 2*n*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext, d_out_ext, CUFFT_FORWARD);
/* Allocate memory for shifted data */
cufftComplex *d_out_ext_shift;
cudaMalloc((void **)&d_out_ext_shift, 2*n*sizeof(cufftComplex));
/* 1/2 phase shift */
PhaseShiftForw_1d<<<(2*n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out_ext_shift, 2*n);
/* Subtract the transformed data */
DataSubGetRedft10_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext_shift, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_out_ext);
cudaFree(d_out_ext_shift);
}
void cufft_redft01_1d(float *d_in, float *d_out, int n)
{
/* Allocate meory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, 2*n*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetRedft01_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Allocate memory for shifted data */
cufftComplex *d_in_ext_shift;
cudaMalloc((void **)&d_in_ext_shift, 2*n*sizeof(cufftComplex));
/* -1/2 phase shift */
PhaseShiftBack_1d<<<(2*n+threads_num-1)/threads_num, threads_num>>>(d_in_ext, d_in_ext_shift, 2*n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, 2*n*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext_shift, d_out_ext, CUFFT_INVERSE);
/* Subtract the transformed data */
DataSubGetRedft01_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_in_ext_shift);
cudaFree(d_out_ext);
}
void cufft_rodft00_1d(float *d_in, float *d_out, int n)
{
/* Allocate memory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, (2*n+2)*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetRodft00_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n+2, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, (2*n+2)*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext, d_out_ext, CUFFT_FORWARD);
/* Subtract the transformed data */
DataSubGetRodft00_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_out_ext);
}
void cufft_irodft00_1d(float *d_in, float *d_out, int n)
{
/* Allocate memory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, (2*n+2)*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetIRodft00_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n+2, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, (2*n+2)*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext, d_out_ext, CUFFT_INVERSE);
/* Subtract the transformed data */
DataSubGetIRodft00_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_out_ext);
}
void cufft_rodft10_1d(float *d_in, float *d_out, int n)
{
/* Allocate meory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, 2*n*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetRodft10_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, 2*n*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext, d_out_ext, CUFFT_FORWARD);
/* Allocate memory for shifted data */
cufftComplex *d_out_ext_shift;
cudaMalloc((void **)&d_out_ext_shift, 2*n*sizeof(cufftComplex));
/* 1/2 phase shift */
PhaseShiftForw_1d<<<(2*n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out_ext_shift, 2*n);
/* Subtract the transformed data */
DataSubGetRodft10_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext_shift, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_out_ext);
cudaFree(d_out_ext_shift);
}
void cufft_rodft01_1d(float *d_in, float *d_out, int n)
{
/* Allocate meory for extended complex data */
cufftComplex *d_in_ext;
cudaMalloc((void **)&d_in_ext, 2*n*sizeof(cufftComplex));
/* Extend and convert the data from float to cufftComplex */
DataExtSetRodft01_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_in, d_in_ext, n);
/* Allocate memory for shifted data */
cufftComplex *d_in_ext_shift;
cudaMalloc((void **)&d_in_ext_shift, 2*n*sizeof(cufftComplex));
/* -1/2 phase shift */
PhaseShiftBack_1d<<<(2*n+threads_num-1)/threads_num, threads_num>>>(d_in_ext, d_in_ext_shift, 2*n);
/* Create a 1D FFT plan */
cufftHandle plan;
cufftPlan1d(&plan, 2*n, CUFFT_C2C, 1);
/* Allocate memory for transformed data */
cufftComplex *d_out_ext;
cudaMalloc((void **)&d_out_ext, 2*n*sizeof(cufftComplex));
/* Use the CUFFT plan to transform the signal out of place */
cufftExecC2C(plan, d_in_ext_shift, d_out_ext, CUFFT_INVERSE);
/* Subtract the transformed data */
DataSubGetRodft01_1d<<<(n+threads_num-1)/threads_num, threads_num>>>(d_out_ext, d_out, n);
cufftDestroy(plan);
cudaFree(d_in_ext);
cudaFree(d_in_ext_shift);
cudaFree(d_out_ext);
}
static __global__ void DataExtSetRedft00_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix].x = d_in[ix];
d_out[ix].y = 0.0f;
if (ix<n-2)
{
d_out[2*n-3-ix].x = d_in[ix+1];
d_out[2*n-3-ix].y = 0.0f;
}
}
}
static __global__ void DataSubGetRedft00_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = d_in[ix].x;
}
static __global__ void DataExtSetRedft10_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix].x = d_in[ix];
d_out[ix].y = 0.0f;
d_out[2*n-1-ix].x = d_in[ix];
d_out[2*n-1-ix].y = 0.0f;
}
}
static __global__ void DataSubGetRedft10_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = d_in[ix].x;
}
static __global__ void DataExtSetRedft01_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix].x = d_in[ix];
d_out[ix].y = 0.0f;
if (ix<n-1)
{
d_out[2*n-1-ix].x = d_in[ix+1];
d_out[2*n-1-ix].y = 0.0f;
}
}
}
static __global__ void DataSubGetRedft01_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = d_in[ix].x;
}
static __global__ void DataExtSetRodft00_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix+1].x = d_in[ix];
d_out[ix+1].y = 0.0f;
d_out[2*n+1-ix].x = -d_in[ix];
d_out[2*n+1-ix].y = 0.0f;
}
d_out[0].x = d_out[0].y = 0.0f;
d_out[n+1].x = d_out[n+1].y = 0.0f;
}
static __global__ void DataSubGetRodft00_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = -d_in[ix+1].y;
}
static __global__ void DataExtSetIRodft00_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix+1].y = -d_in[ix];
d_out[ix+1].x = 0.0f;
d_out[2*n+1-ix].y = d_in[ix];
d_out[2*n+1-ix].x = 0.0f;
}
d_out[0].x = d_out[0].y = 0.0f;
d_out[n+1].x = d_out[n+1].y = 0.0f;
}
static __global__ void DataSubGetIRodft00_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = d_in[ix+1].x;
}
static __global__ void DataExtSetRodft10_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix].x = d_in[ix];
d_out[ix].y = 0.0f;
d_out[2*n-1-ix].x = -d_in[ix];
d_out[2*n-1-ix].y = 0.0f;
}
}
static __global__ void DataSubGetRodft10_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = -d_in[ix+1].y;
}
static __global__ void DataExtSetRodft01_1d(float *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
d_out[ix+1].y = -d_in[ix];
d_out[ix+1].x = 0.0f;
if (ix<n-1)
{
d_out[2*n-1-ix].y = d_in[ix];
d_out[2*n-1-ix].x = 0.0f;
}
}
d_out[0].x = d_out[0].y = 0.0f;
}
static __global__ void DataSubGetRodft01_1d(cufftComplex *d_in, float *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
d_out[ix] = d_in[ix].x;
}
static __global__ void PhaseShiftForw_1d(cufftComplex *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
float d_k;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
if (ix<n/2)
d_k = (float)ix*pi/(float)(n/2);
else
d_k = -pi+(float)(ix-n/2)*pi/(float)(n/2);
d_out[ix].x = d_in[ix].x*cosf(d_k/2.0f)+d_in[ix].y*sinf(d_k/2.0f);
d_out[ix].y = -d_in[ix].x*sinf(d_k/2.0f)+d_in[ix].y*cosf(d_k/2.0f);
}
}
static __global__ void PhaseShiftBack_1d(cufftComplex *d_in, cufftComplex *d_out, int n)
{
//int tid = threadIdx.x;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
float d_k;
//for (int ix=tid; ix<n; ix+=threads_num)
if (ix < n)
{
if (ix<n/2)
d_k = (float)ix*pi/(float)(n/2);
else
d_k = -pi+(float)(ix-n/2)*pi/(float)(n/2);
d_out[ix].x = d_in[ix].x*cosf(d_k/2.0f)-d_in[ix].y*sinf(d_k/2.0f);
d_out[ix].y = d_in[ix].x*sinf(d_k/2.0f)+d_in[ix].y*cosf(d_k/2.0f);
}
}
void data_test_forw_1d(float *h_in, float *h_out, int n)
{
float *d_in;
cudaMalloc((void **)&d_in, n*sizeof(float));
cudaMemcpy(d_in, h_in, n*sizeof(float), cudaMemcpyHostToDevice);
float *d_out;
cudaMalloc((void **)&d_out, n*sizeof(float));
cufft_rodft00_1d(d_in, d_out, n);
cudaMemcpy(h_out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
void data_test_back_1d(float *h_in, float *h_out, int n)
{
float *d_in;
cudaMalloc((void **)&d_in, n*sizeof(float));
cudaMemcpy(d_in, h_in, n*sizeof(float), cudaMemcpyHostToDevice);
float *d_out;
cudaMalloc((void **)&d_out, n*sizeof(float));
cufft_rodft01_1d(d_in, d_out, n);
cudaMemcpy(h_out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
void data_test_derv_1d(float *h_in, float *h_out, float dx, int n)
{
float *d_in;
cudaMalloc((void **)&d_in, n*sizeof(float));
cudaMemcpy(d_in, h_in, n*sizeof(float), cudaMemcpyHostToDevice);
float *d_out;
cudaMalloc((void **)&d_out, n*sizeof(float));
cufft_dct2_idst1_1d(d_in, d_out, dx, n);
cudaMemcpy(h_out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
|
20,118 | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//
// template kernel routine
//
template <int size>
__global__ void my_first_kernel(float *x)
{
float xl[size];
int tid = threadIdx.x + blockDim.x*blockIdx.x;
for (int i=0; i<size; i++) {
xl[i] = expf((float) i*tid);
}
float sum = 0.0f;
for (int i=0; i<size; i++) {
for (int j=0; j<size; j++) {
sum += xl[i]*xl[j];
}
}
x[tid] = sum;
}
//
// CUDA routine to be called by main code
//
extern
int prac6(int nblocks, int nthreads)
{
float *h_x, *d_x;
int nsize, n;
// allocate memory for arrays
nsize = nblocks*nthreads ;
h_x = (float *)malloc(nsize*sizeof(float));
cudaMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel for size=2
my_first_kernel<2><<<nblocks,nthreads>>>(d_x);
cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, x = %d %g \n",n,h_x[n]);
// execute kernel for size=3
my_first_kernel<3><<<nblocks,nthreads>>>(d_x);
cudaMemcpy(h_x,d_x,nsize*sizeof(int),cudaMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, i = %d %g \n",n,h_x[n]);
// free memory
cudaFree(d_x);
free(h_x);
return 0;
}
|
20,119 | #include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
__global__ void find_maximum_kernel(float *array, float *max, int *mutex, int N, float start_max) {
int start_i = threadIdx.x + blockIdx.x*blockDim.x;
int step = gridDim.x*blockDim.x;
__shared__ float cache[256];
float temp_max = start_max;
for(int i = start_i; i < N; i += step){
temp_max = fmaxf(temp_max, array[i]);
}
cache[threadIdx.x] = temp_max;
__syncthreads();
// reduction
for(int i = blockDim.x/2 ; i != 0 ; i /= 2){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
int main() {
int N = 1000*1000*20;
float *host_array;
float *device_array;
float *host_max;
float *device_max;
int *device_mutex;
// allocate memory
host_array = (float*)malloc(N*sizeof(float));
host_max = (float*)malloc(sizeof(float));
cudaMalloc((void**)&device_array, N*sizeof(float));
cudaMalloc((void**)&device_max, sizeof(float));
cudaMalloc((void**)&device_mutex, sizeof(int));
cudaMemset(device_max, 0, sizeof(float));
cudaMemset(device_mutex, 0, sizeof(float));
// fill host array with data
srand(10);
for(int i=0;i<N;i++){
host_array[i] = 10000*float(rand()) / RAND_MAX-5000;
}
// set up timing variables
float gpu_elapsed_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
// copy from host to device
cudaEventRecord(gpu_start, 0);
cudaMemcpy(device_array, host_array, N*sizeof(float), cudaMemcpyHostToDevice);
// call kernel
dim3 gridSize = 256;
dim3 blockSize = 256;
find_maximum_kernel<<< gridSize, blockSize >>>(device_array, device_max, device_mutex, N, host_array[0]);
// copy from device to host
cudaMemcpy(host_max, device_max, sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
//report results
std::cout<<"Maximum number found on gpu was: "<<*host_max<<std::endl;
std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
// run cpu version
clock_t cpu_start = clock();
*host_max = host_array[0];
for(int i=0;i<N;i++){
if(host_array[i] > *host_max){
*host_max = host_array[i];
}
}
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
std::cout<<"Maximum number found on cpu was: "<<*host_max<<std::endl;
std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl;
// free memory
free(host_array);
free(host_max);
cudaFree(device_array);
cudaFree(device_max);
cudaFree(device_mutex);
}
|
20,120 | #include "includes.h"
__global__ void shared1RC1W1G(float *A, float *B, float *C, const int N)
{
// compilador é esperto e aproveita o valor de i, mas faz 1W, 2 R nas outras posições da Shared
__shared__ float Smem[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
Smem[(threadIdx.x+1)%512] = i;
C[i] = Smem[(threadIdx.x*2)%512];
}
} |
20,121 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define R 5
#define C 40
__device__ int gpu_strlen(char * s)
{
int i = 0;
while(s[i++] != '\0')
{
}
return i;
}
__device__ int gpu_isAlpha(char ch)
{
if((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'))
return 1;
else
return 0;
}
__global__ void wordCount2( char **a, int **out, int numLine, int maxLineLen )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int currLen = gpu_strlen(a[iy]);
__shared__ char s_data[C];
if(ix >= gpu_strlen(a[0]) && iy >= numLine) {
return;
}
s_data[ix] = a[iy][ix];
//each thread process one character within a line
if( ix < currLen && gpu_isAlpha(s_data[ix]) != 1 )
{
out[iy][ix] += 1;
}
__syncthreads();
if(out[iy][ix] == 1 && ix < currLen)
out[iy][ix + 1] = 0;
}
void checkErr()
{
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess)
printf ("Cuda error -- %s\n", cudaGetErrorString(code));
}
void printArr( char **a, int lines )
{
int i;
for(i=0; i<lines; i++)
{
printf("%s\n", a[i]);
}
}
int main()
{
int i, j;
char **d_in, **h_in, **h_out;
int h_count_in[R][C], **h_count_out, **d_count_in;
for(i = 0; i < R; i++)
for(j = 0; j < C; j ++)
h_count_in[i][j] = 0;
//allocate
h_in = (char **)malloc(R * sizeof(char *));
h_out = (char **)malloc(R * sizeof(char *));
h_count_out = (int **)malloc(R * sizeof(int *));
cudaDeviceReset();
cudaMalloc((void ***)&d_in, sizeof(char *) * R);
cudaMalloc((void ***)&d_count_in, sizeof(int *) * R);
//alocate for string data
for(i = 0; i < R; ++i)
{
cudaMalloc((void **) &h_out[i],C * sizeof(char));
h_in[i]=(char *)calloc(C, sizeof(char));//allocate or connect the input data to it
strcpy(h_in[i], "good morning and I'm a good student!");
cudaMemcpy(h_out[i], h_in[i], strlen(h_in[i]) + 1, cudaMemcpyHostToDevice);
}
cudaMemcpy(d_in, h_out, sizeof(char *) * R,cudaMemcpyHostToDevice);
//alocate for output occurrence
for(i = 0; i < R; ++i)
{
cudaMalloc((void **) &h_count_out[i], C * sizeof(int));
cudaMemset(h_count_out[i], 0, C * sizeof(int));
}
cudaMemcpy(d_count_in, h_count_out, sizeof(int *) * R,cudaMemcpyHostToDevice);
printArr(h_in, R);
printf("\n\n");
//set up kernel configuration variables
dim3 grid, block;
block.x = C; //NOTE: differs from last lab6 here, Why?
block.y = 1;
grid.x = ceil((float)C / block.x);
grid.y = ceil((float)R / block.y); //careful must be type cast into float, otherwise, integer division used
printf("grid.x = %d, grid.y=%d\n", grid.x, grid.y );
//printf("block.x = %d, block.y=%d\n", block.x, block.y );
//launch kernel
wordCount2<<<grid, block>>>( d_in, d_count_in, R, C);
checkErr();
//copy data back from device to host
for(i = 0; i < R; ++i) {
cudaMemcpy(h_count_in[i], h_count_out[i], sizeof(int) * C,cudaMemcpyDeviceToHost);
}
printf("Occurrence array obtained from device:\n");
for(i = 0; i < R; i ++) {
for(int j = 0; j < C; j ++)
printf("%4d", h_count_in[i][j]);
printf("\n");
}
return 0;
}
|
20,122 | extern "C"
__global__ void matrixAdd(int n, int *a, int *b, int *c) {
int i = threadIdx.x;
int j = threadIdx.y;
int index = i + j * n;
c[index] = (a[index] + b[index]) * 5 + 8;
} |
20,123 | //#include "global_gpu.cuh"
//
//float fc1_b[FC1_SIZE];
//float fc1_w[FC1_SIZE][ROWOW][COL];
//float fc2_b[FC2_SIZE];
//float fc2_w[FC2_SIZE][FC1_SIZE];
//
//__constant__ float _alpha;
//__constant__ int _minibatch;
//__constant__ int _epochs;
//
//__device__ int _correct_cnt;
//__device__ float _avg_error;
//
////int correct_cnt = 3;
////float avg_error = 2;
////float max_acc;
////
////float alpha = 0.2;
////int epochs = 5;
////int minibatch = 1;
//
//float train_image[TROWAIN_NUM][ROWOW][COL];
//int train_label[TROWAIN_NUM];
//float test_image[TEST_NUM][ROWOW][COL];
//int test_label[TEST_NUM];
//
//float input[ROWOW][COL];
//float fc1_z[FC1_SIZE];
//float fc1_a[FC1_SIZE];
//float fc2_z[FC2_SIZE];
//float fc2_a[FC2_SIZE];
//float output[FC2_SIZE];
//int answer[FC2_SIZE];
//
//__device__ float _train_image[TROWAIN_NUM][ROWOW][COL];
//__device__ int _train_label[TROWAIN_NUM];
//__device__ float _test_image[TEST_NUM][ROWOW][COL];
//__device__ int _test_label[TEST_NUM];
//
//__device__ float _fc1_b[FC1_SIZE];
//__device__ float _fc1_w[FC1_SIZE][ROWOW][COL];
//__device__ float _fc2_b[FC2_SIZE];
//__device__ float _fc2_w[FC2_SIZE][FC1_SIZE];
//
////__device__ float _input[ROWOW][COL];
//__device__ float _fc1_z[BATCH_SIZE][FC1_SIZE];
//__device__ float _fc1_a[BATCH_SIZE][FC1_SIZE];
//__device__ float _fc2_z[BATCH_SIZE][FC2_SIZE];
//__device__ float _fc2_a[BATCH_SIZE][FC2_SIZE];
//__device__ float _output[BATCH_SIZE][FC2_SIZE];
//__device__ int _answer[BATCH_SIZE][FC2_SIZE];
//
//__device__ float _fc1_db[BATCH_SIZE][FC1_SIZE];
//__device__ float _fc1_dw[BATCH_SIZE][FC1_SIZE][ROWOW][COL];
//__device__ float _fc2_db[BATCH_SIZE][FC2_SIZE];
//__device__ float _fc2_dw[BATCH_SIZE][FC2_SIZE][FC1_SIZE];
//__device__ float _C[BATCH_SIZE][FC2_SIZE];
//__device__ float _fc2_delta[BATCH_SIZE][FC2_SIZE];
//__device__ float _fc1_delta[BATCH_SIZE][FC1_SIZE];
//
//__device__ int tmp;
|
20,124 | // Device functions
__device__ __host__ float3 distanceBetweenPoints(float3 a, float3 b)
{
// calculate distance between points
float3 distance;
distance.x = b.x - a.x;
distance.y = b.y - a.y;
distance.z = b.z - a.z;
return distance;
}
__device__ __host__ float vectorMagnitude(float3 v)
{
// calculate magnitude of vector
return sqrt(v.x * v.x + v.y*v.y + v.z*v.z);
}
__device__ __host__ float3 GetNormal(float4 a, float4 b, float4 c)
{
float3 x;
x.x= b.x-a.x;
x.y= b.y-a.y;
x.z= b.z-a.z;
float3 y;
y.x= c.x-a.x;
y.y= c.y-a.y;
y.z= c.z-a.z;
float3 n= make_float3(-(y.y*x.z- y.z*x.y), -(y.z*x.x- y.x*x.z), -(y.x*x.y - y.y*x.x));
return n;
}
|
20,125 | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/tuple.h>
#include <thrust/generate.h>
#include <thrust/random.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////// Global variables section //////////
__constant__ int BLOCK_COUNT;
__constant__ int THREADS_PER_BLOCK;
////////// End section //////////
////////// Device functions section //////////
__device__
int getThreadId() {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__
int getBlockId() {
return blockIdx.y * gridDim.x + blockIdx.x;
}
__device__
int get1dIndex(int row, int col, int width) {
return width * row + col;
}
__device__
int takeParent(double* fitness, int startIndex, double prob) {
int i;
double sum = 0;
for(i = startIndex; i < startIndex + THREADS_PER_BLOCK; ++i) {
sum += fitness[i];
if(prob < sum) {
return i;
}
}
return --i;
}
////////// End section //////////
////////// Kernels section //////////
__global__
void initRandomKernel(unsigned int seed, curandState_t* states) {
int id = getThreadId();
curand_init(seed, id, 0, &states[id]);
}
__global__
void uniformRandomSelectionKernel(curandState_t* states, double* selectionRandomArray) {
selectionRandomArray[getThreadId()] = curand_uniform(&states[getThreadId()]);
}
__global__
void crossoverRandomPointsKernel(curandState_t* states, int* crossoverPointsArray, int max) {
crossoverPointsArray[getThreadId()] = curand(&states[getThreadId()]) % (max - 1);
}
///
/// \brief fitnessKernel calculates fit for each individual (thread per individual)
/// \param matrix - graph matrix
/// \param population
/// \param fitness - array for result of calculation
/// \param populationSize
/// \param matrixSize - number of rows(or columns) in matrix
///
__global__
void fitnessKernel(int* matrix, int* population, double* fitness, int populationSize, int matrixSize) {
double fit = 0;
int startIndex = getThreadId() * matrixSize;
for(int i = startIndex; i < startIndex + matrixSize - 1; ++i) {
fit += matrix[get1dIndex(population[i], population[i + 1], matrixSize)];
}
fit += matrix[get1dIndex(population[startIndex + matrixSize - 1], population[startIndex], matrixSize)];
fit = 1.0 / fit;
fitness[getThreadId()] = fit;
__syncthreads();
// normilize fitness to roulette selection
// calculate sum of fitness in block
double sum = 0;
startIndex = getBlockId() * THREADS_PER_BLOCK;
for(int i = startIndex; i < startIndex + THREADS_PER_BLOCK; ++i) {
sum += fitness[i];
}
fit = fit / sum;
// printf("%f %d\n", fit, getThreadId());
__syncthreads();
fitness[getThreadId()] = fit;
}
///
/// \brief breedKernel makes roulette selection, and perfroms OX crossover. Thread per two individuals
/// \param population
/// \param nextGeneration
/// \param fitness
/// \param probability - array with random numbers for roulette selection
/// \param crossoverPoints - array with random numbers for crossover
/// \param size - genes count in individual
///
__global__
void breedKernel(int* population, int* nextGeneration, double* fitness, double* probability, int* crossoverPoints, int size) {
// thread runs roulette twice and finds two parents and starts crossover
int startIndex = getBlockId() * THREADS_PER_BLOCK;
int parent1Index = takeParent(fitness, startIndex, probability[getThreadId() * 2]);
int parent2Index = takeParent(fitness, startIndex, probability[getThreadId() * 2 + 1]);
// start ox crossover
int firstPoint = crossoverPoints[getThreadId() * 2];
int secondPoint = crossoverPoints[getThreadId() * 2 + 1];
if(firstPoint > secondPoint) {
int tmp = firstPoint;
firstPoint = secondPoint;
secondPoint = tmp;
}
for(int i = firstPoint; i < secondPoint; ++i) {
// child 1
nextGeneration[get1dIndex(getThreadId() * 2, i, size)] = population[get1dIndex(parent1Index, i, size)];
// child 2
nextGeneration[get1dIndex(getThreadId() * 2 + 1, i, size)] = population[get1dIndex(parent2Index, i, size)];
}
int placeIndex1 = secondPoint;
int placeIndex2 = secondPoint;
for(int i = 0; i < size; ++i) {
int index = (secondPoint + i) % size;
int parent1Gene = population[get1dIndex(parent1Index, index, size)];
int parent2Gene = population[get1dIndex(parent2Index, index, size)];
bool isInSlice = false;
for(int j = firstPoint; j < secondPoint; ++j) {
if(population[get1dIndex(parent1Index, j, size)] == parent2Gene) {
isInSlice = true;
break;
}
}
if(!isInSlice) {
nextGeneration[get1dIndex(getThreadId() * 2, placeIndex1 % size, size)] = parent2Gene;
placeIndex1++;
}
isInSlice = false;
for(int j = firstPoint; j < secondPoint; ++j) {
if(population[get1dIndex(parent2Index, j, size)] == parent1Gene) {
isInSlice = true;
break;
}
}
if(!isInSlice) {
nextGeneration[get1dIndex(getThreadId() * 2 + 1, placeIndex2 % size, size)] = parent1Gene;
placeIndex2++;
}
}
}
__global__
void mutationKernel(int* population, double* prob, int* randomPoints, int size) {
int index = getThreadId();
if(prob[index] < 0.2f) {
// swap mutation;
int tmp = population[get1dIndex(index, randomPoints[index * 2], size)];
population[get1dIndex(index, randomPoints[index * 2], size)] = population[get1dIndex(index, randomPoints[index * 2 + 1], size)];
population[get1dIndex(index, randomPoints[index * 2 + 1], size)] = tmp;
}
}
__global__
void copyToMigrationPoolKernel(int* population, int* pool, int genesPerThread, int size) {
int index = getBlockId() * THREADS_PER_BLOCK * size + threadIdx.x * genesPerThread;
for(int i = 0; i < genesPerThread; ++i) {
pool[getThreadId() + i] = population[index];
++index;
}
}
__global__
void copyFromMigrationPoolKernel(int* population, int* pool, int genesPerThread, int size) {
int to = ((getBlockId() + 1) % BLOCK_COUNT) * THREADS_PER_BLOCK * size + threadIdx.x * genesPerThread;
for(int i = 0; i < genesPerThread; ++i) {
population[to] = pool[getThreadId() + i];
++to;
}
}
////////// End section //////////
extern "C"
int* gaCuda(int* graphMatrix, int matrixSize, int* population, int populationPerIslandSize, int populationSize) {
// Cuda configuration
int threadsPerBlock = populationPerIslandSize;
int numBlocks = 4;
int migrationPoolPerIsland = 4;
// Thread per gene
int threadsNumberForMigration = migrationPoolPerIsland * matrixSize;
// best individual
int* best = (int*)malloc(matrixSize * sizeof(int));
int* population1d;
int* nextPopulation1d;
int* migrationPool;
double* fitnessArray;
double* selectionRandomArray;
int* randomPointsArray; // points for crossover and mutation
int* matrixArray;
curandState_t* states;
// Allocate memory
cudaMalloc(&matrixArray, matrixSize * matrixSize * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&population1d, populationSize * matrixSize * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&nextPopulation1d, populationSize * matrixSize * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&migrationPool, migrationPoolPerIsland * numBlocks * matrixSize * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&fitnessArray, populationSize * sizeof(double));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&states, populationPerIslandSize * 2 * numBlocks * sizeof(curandState_t));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&selectionRandomArray, populationPerIslandSize * numBlocks * sizeof(double));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc(&randomPointsArray, populationPerIslandSize * 2 * numBlocks * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
// Fill memory
cudaMemcpy(matrixArray, graphMatrix, matrixSize * matrixSize * sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(population1d, population, populationSize * matrixSize * sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpyToSymbol(BLOCK_COUNT, &numBlocks, sizeof(int), 0, cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpyToSymbol(THREADS_PER_BLOCK, &threadsPerBlock, sizeof(int), 0, cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
cudaMemset(nextPopulation1d, -1, populationSize * matrixSize * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
fitnessKernel<<<numBlocks, threadsPerBlock>>>(matrixArray, population1d, fitnessArray, populationPerIslandSize, matrixSize);
for(int i = 0; i < 2500; ++i) {
initRandomKernel<<<numBlocks, threadsPerBlock>>>(time(0), states);
gpuErrchk(cudaPeekAtLastError());
uniformRandomSelectionKernel<<<numBlocks, threadsPerBlock>>>(states, selectionRandomArray);
gpuErrchk(cudaPeekAtLastError());
crossoverRandomPointsKernel<<<numBlocks, threadsPerBlock>>>(states, randomPointsArray, matrixSize);
gpuErrchk(cudaPeekAtLastError());
breedKernel<<<numBlocks, threadsPerBlock / 2>>>(population1d, nextPopulation1d, fitnessArray, selectionRandomArray, randomPointsArray, matrixSize);
mutationKernel<<<numBlocks, threadsPerBlock>>>(nextPopulation1d, selectionRandomArray, randomPointsArray, matrixSize);
gpuErrchk(cudaPeekAtLastError());
fitnessKernel<<<numBlocks, threadsPerBlock>>>(matrixArray, nextPopulation1d, fitnessArray, populationPerIslandSize, matrixSize);
gpuErrchk(cudaPeekAtLastError());
if(i % 100 == 0 && i != 0) {
// copyToMigrationPoolKernel<<<numBlocks, threadsNumberForMigration>>>(population1d, migrationPool, 1, matrixSize);
gpuErrchk(cudaPeekAtLastError());
// copyFromMigrationPoolKernel<<<numBlocks, threadsNumberForMigration>>>(population1d, migrationPool, 1, matrixSize);
gpuErrchk(cudaPeekAtLastError());
}
cudaDeviceSynchronize();
cudaMemcpy(population1d, nextPopulation1d, populationSize * matrixSize * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemset(nextPopulation1d, -1, populationSize * matrixSize * sizeof(int));
gpuErrchk(cudaPeekAtLastError());
}
int index = thrust::max_element(thrust::device, fitnessArray, fitnessArray + populationSize) - fitnessArray;
cudaMemcpy(best, population1d + index * matrixSize, matrixSize * sizeof(int), cudaMemcpyDeviceToHost);
gpuErrchk(cudaPeekAtLastError());
cudaDeviceSynchronize();
cudaFree(population1d);
cudaFree(nextPopulation1d);
cudaFree(fitnessArray);
cudaFree(matrixArray);
cudaFree(states);
cudaFree(randomPointsArray);
cudaFree(selectionRandomArray);
cudaFree(migrationPool);
return best;
}
|
20,126 | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// You can use any other block size you wish.
#define BLOCK_SIZE 256
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 2
int LOG_BLOCK_SIZE;
void getLogBlockSize(int block_size) {
for(LOG_BLOCK_SIZE = 1;LOG_BLOCK_SIZE < 31; LOG_BLOCK_SIZE++) {
if((1<<LOG_BLOCK_SIZE) >= block_size)
return;
}
fprintf(stderr, "The size requested might be too large!\n");
exit(-1);
}
__global__ void kernel_reduction(float *inArray, int numElements, int stride, int numRest) {
int tid = threadIdx.x;
int bidx = blockIdx.x, bidy = blockIdx.y;
int idx = tid + blockDim.x * bidx + blockDim.x * gridDim.x * bidy;
__shared__ float idata[(BLOCK_SIZE << 1)+256];
int copyIdx = stride * ((idx << 1) + 1) - 1;
int copyToIdx = tid<<1;
copyToIdx += (copyToIdx>>4);
idata[copyToIdx] = inArray[copyIdx];
idata[copyToIdx+1] = idata[copyToIdx] + inArray[copyIdx + stride];
__syncthreads();
int localStride = 2;
for(numRest>>=1;numRest > 1; numRest >>= 1, localStride <<= 1) {
if((tid<<1) < numRest) {
int idxOne = (localStride << 1) * (tid + 1) - 1;
int idxTwo = idxOne - localStride;
idxOne += (idxOne >> 4);
idxTwo += (idxTwo >> 4);
idata[idxOne] += idata[idxTwo];
}
__syncthreads();
}
inArray[copyIdx] = idata[copyToIdx];
inArray[copyIdx+stride] = idata[copyToIdx+1];
}
__global__ void kernel_downtraverse(float *inArray, int numElements, int startStride, int LOG_BLOCK_SIZE) {
int tid = threadIdx.x;
int bidx = blockIdx.x, bidy = blockIdx.y;
int idx = tid + blockDim.x * bidx + blockDim.x * gridDim.x * bidy;
int finalStride = (startStride >> LOG_BLOCK_SIZE);
if(finalStride <= 0)
finalStride = 1;
if((startStride << 1) == numElements) {
__shared__ float idata[(BLOCK_SIZE<<1)+256];
int copyIdx = finalStride * ((idx << 1) + 1) - 1;
int copyToIdx = (tid<<1);
copyToIdx += (copyToIdx>>4);
if(copyIdx < numElements){
idata[copyToIdx] = inArray[copyIdx];
idata[copyToIdx + 1] = inArray[copyIdx+finalStride];
}
__syncthreads();
int localStride = blockDim.x;
while(localStride >= 1) {
int idxOne = (localStride << 1) * (tid + 1) - 1;
if(idxOne < (blockDim.x<<1)) {
int idxTwo = idxOne - localStride;
idxOne += (idxOne>>4);
idxTwo += (idxTwo>>4);
float tmp = idata[idxOne] + idata[idxTwo];
idata[idxTwo] = idata[idxOne];
idata[idxOne] = tmp;
}
localStride >>= 1;
__syncthreads();
}
if(copyIdx < numElements) {
inArray[copyIdx] = idata[copyToIdx];
inArray[copyIdx+finalStride] = idata[copyToIdx+1];
}
}
else {
int stride = startStride;
int idxOne = (stride << 1) * (idx + 1) - 1;
if(idxOne < numElements) {
int idxTwo = idxOne - stride;
float tmp = inArray[idxOne] + inArray[idxTwo];
inArray[idxTwo] = inArray[idxOne];
inArray[idxOne] = tmp;
}
}
}
// **===-------- Modify the body of this function -----------===**
// You may need to make multiple kernel calls.
void prescanArray(float *inArray, int numElements)
{
unsigned numRests = numElements;
int stride = 1;
while(numRests > 1) {
unsigned threads = numRests / 2;
unsigned gridX = 1, gridY = 1;
if(threads > BLOCK_SIZE) {
gridX = threads / BLOCK_SIZE;
threads = BLOCK_SIZE;
if(gridX > 32768) {
gridY = gridX / 32768;
gridX = 32768;
}
}
dim3 grids(gridX, gridY);
kernel_reduction<<<grids,threads>>>(inArray, numElements, stride, numRests > (2*BLOCK_SIZE)? (2*BLOCK_SIZE) : numRests);
stride <<= (LOG_BLOCK_SIZE + 1);
numRests >>= (LOG_BLOCK_SIZE + 1);
}
/*
cudaMemcpy(tmpArray, inArray, 10*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0;i<10;i++)
printf("%f\n", tmpArray[i]);
*/
float tmpNum = 0.0f;
cudaMemcpy(inArray + numElements - 1, &tmpNum, sizeof(float), cudaMemcpyHostToDevice);
unsigned threads = BLOCK_SIZE;
unsigned gridX = 1, gridY = 1;
if(threads >= (numElements>>1)) {
threads = (numElements>>1);
dim3 grids(gridX, gridY);
kernel_downtraverse<<<grids, threads>>>(inArray, numElements, threads, LOG_BLOCK_SIZE);
}
else {
dim3 grids(gridX, gridY);
kernel_downtraverse<<<grids, threads>>>(inArray, numElements, numElements>>1, LOG_BLOCK_SIZE);
int stride = numElements >> (LOG_BLOCK_SIZE + 2);
while(stride>0) {
gridX <<= 1;
if(gridX > 32768) {
gridX >>= 1;
gridY <<= 1;
}
dim3 grids2(gridX, gridY);
kernel_downtraverse<<<grids2, threads>>>(inArray, numElements, stride, LOG_BLOCK_SIZE);
stride>>=1;
}
}
}
// **===-----------------------------------------------------------===**
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
unsigned getSmallestPower2(unsigned);
unsigned int cutComparef( float *reference, float *h_data, int num_elements, float err);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
// Get the power of 2 which is the least of the all powers that are not smaller
// than the given number
////////////////////////////////////////////////////////////////////////////////
int getSmallestPower2(int num) {
int result = 1;
while(result < num && result > 0)
result <<= 1;
if(result <= 0 || num <= 0) {
fprintf(stderr, "The size requested might be two large!\n");
exit(-1);
}
return result;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
float device_time;
float host_time;
int num_elements = 0; // Must support large, non-power-of-2 arrays
int compare_size = 0;
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = NULL;
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicates the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
if(argc <= 1)
compare_size = num_elements = DEFAULT_NUM_ELEMENTS;
else
compare_size = num_elements = atoi(argv[1]);
int tmp_size = num_elements;
num_elements = getSmallestPower2(num_elements);
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
//h_data = (float*) malloc( mem_size);
cudaMallocHost(&h_data, mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = 0.0f;
}
for( unsigned int i = 0; i < tmp_size; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND)*2 - 1;
}
break;
}
getLogBlockSize(BLOCK_SIZE);
cudaEvent_t time_start;
cudaEvent_t time_end;
cudaEventCreate(&time_start);
cudaEventCreate(&time_end);
// compute reference solution
float* reference = (float*) malloc( mem_size);
cudaEventRecord(time_start, 0);
computeGold( reference, h_data, num_elements);
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&host_time, time_start, time_end);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", host_time);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// **===-------- Allocate data structure here -----------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_idata, 16);
// Run the prescan
cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice);
cudaEventRecord(time_start, 0);
// **===-------- Modify the body of this function -----------===**
prescanArray(d_idata, num_elements);
// **===-----------------------------------------------------------===**
cudaThreadSynchronize();
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&device_time, time_start, time_end);
// copy result from device to host
cudaMemcpy( h_data, d_idata, sizeof(float) * compare_size,
cudaMemcpyDeviceToHost);
printf("CUDA Processing time: %f (ms)\n", device_time);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, compare_size, 1e-6);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cudaFreeHost(h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
printf("------------------------------------------------------\n\n");
}
unsigned int cutComparef( float *reference, float *h_data, int num_elements, float err) {
int i;
int diff_count = 0;
for (i = 0; i < num_elements; i++) {
float diff = fabs(reference[i] - h_data[i]);
float denominator = 1.f;
if (denominator < fabs(reference[i])) {
denominator = fabs(reference[i]);
}
if (i % 1000000 == 0) {
//printf("Diff at %d: %g %g\n", i, diff, diff / denominator);
}
if (!(diff / denominator < err)) {
//printf("Diff at %d: %g %g\n", i, diff, diff / denominator);
getchar();
diff_count ++;
}
}
if (diff_count > 0) {
printf("Number of difference: %d\n", diff_count);
return 0;
} else {
return 1;
}
}
|
20,127 | #include "includes.h"
// Define and implement the GPU addition function
// This version is a vector addition, with N threads
// and and N blocks
// Adding one a and b instance and storing in one c instance.
// Nmber of blocks
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} |
20,128 | // Salt and pepper noise removal via inpainting with Cuda C/C++
// Original framework for code taken from imflipG.cu
// Modified by Ethan Webster and Ashley Suchy
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <math.h>
#include <ctype.h>
#include <cuda.h>
#include <time.h>
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define CEIL(a,b) ((a+b-1)/b)
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
//image properties struct
struct ImgProp{
int Hpixels;
int Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
// noisy pixel location struct (raster coordinates)
typedef struct{
ui i; // row
ui j; // column
}pixelCoords;
// buffers for images, noisy pixel matrix, and kernel indices
uch *TheImg, *CopyImg;
uch *GPUImg, *GPUCopyImg, *GPUptr, *GPUResult, *NoiseMap, *KernelIndices;
double *GPU_PREV_BW, *GPU_CURR_BW;
// noisy pixel locations
pixelCoords *NoisyPixelCoords;
// mutex variables for tracking noisy pixels and SAD
ui *GlobalMax, *GlobalMin, *NumNoisyPixelsGPU, *GPUmutexes, *GPU_SAD;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that locates potentially noisy pixels in an image by using impulse noise detection
__global__
void findNoisyPixels(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, ui*globalMax, ui*globalMin, ui*ListLength, ui Hpixels, ui Vpixels)
{
// threads/blocks info and IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
// leave buffer frame around image to avoid 8 edge cases for convolutions
// (this is a PERFECT example of avoiding unecessary complexity!!!)
if (MYcol > Hpixels-4 || MYcol < 3 || MYrow > Vpixels-4 || MYrow < 3) return;
ui MYpixIndex = MYrow * Hpixels + MYcol; // pixel index in B&W image
uch pIJ = ImgSrc[MYpixIndex];
uch max = 0;
uch min = 255;
uch curr;
uch nMax;
uch nMin;
uch oldMax;
uch oldMin;
int row;
int col;
int indx;
// find min and max pixel intensities in current window
for (int i = -1; i <= 1; i++){
for (int j = -1; j <= 1; j++){
if(!(j==0 && i==0)){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
curr = ImgSrc[indx];
if(curr > max)
max = curr;
if(curr < min)
min = curr;
}
}
}
// atomically update global max and min pixel intensities
oldMax = atomicMax(globalMax, (ui)max);
oldMin = atomicMin(globalMin, (ui)min);
// if the old max wasn't updated, then max is "salt" noise
// otherwise, we must assume that 255 is "salt" noise
if(oldMax == max)
nMax = max;
else
nMax = 255;
// if the old min wasn't updated, then min is "pepper" noise
// otherwise, we must assume that 0 is "pepper" noise
if(oldMin == min)
nMin = min;
else
nMin = 0;
// if the current pixel intensity is equal to min or max,
// then it is likely s&p noise. Mark as such.
if(pIJ == nMin || pIJ == nMax){
int listIndex = atomicAdd(ListLength, (ui)1);
locations[listIndex].i = MYrow;
locations[listIndex].j = MYcol;
noiseMap[MYpixIndex] = 0;
}
}
//3x3 standard mask
__constant__
double mask0[3][3] = { {0.1036, 0.1464, 0.1036},
{0.1464, 0, 0.1464},
{0.1036, 0.1464, 0.1036}};
// horizontal 5x5 mask
__constant__
double mask1[5][5] = { {0, 0, 0, 0, 0 },
{0.0465, 0.0735, 0.1040, 0.0735, 0.0465 },
{0.0520, 0.1040, 0, 0.1040, 0.0520 },
{0.0465, 0.0735, 0.1040, 0.0735, 0.0465 },
{0, 0, 0, 0, 0 }};
//vertical 5x5 mask
__constant__
double mask2[5][5] = { {0, 0.0465, 0.0520, 0.0465, 0},
{0, 0.0735, 0.1040, 0.0735, 0},
{0, 0.1040, 0, 0.1040, 0},
{0, 0.0735, 0.1040, 0.0735, 0},
{0, 0.0465, 0.0520, 0.0465, 0}};
//45 degree 7x7 mask
__constant__
double mask3[7][7] = { {0, 0, 0, 0, 0.0251, 0, 0 },
{0, 0, 0, 0.0397, 0.0355, 0.0281, 0 },
{0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 },
{0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 },
{0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 },
{0, 0.0281, 0.0355, 0.0397, 0, 0, 0 },
{0, 0, 0.0251, 0, 0, 0, 0 }};
//135 degree 7x7 mask
__constant__
double mask4[7][7] = { {0, 0, 0.0251, 0, 0, 0, 0 },
{0, 0.0281, 0.0355, 0.0397, 0, 0, 0 },
{0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 },
{0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 },
{0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 },
{0, 0, 0, 0.0397, 0.0355, 0.0281, 0 },
{0, 0, 0, 0, 0.0251, 0, 0 }};
// Kernel that determines appropriate inpainting mask to use for each noisy pixel based on surrounding noiseless pixels
__global__
void determineMasks(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, uch *kernelIndices, ui ListLength, ui Hpixels, ui R) {
// threads/blocks info and IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
// ensure not out-of-bounds
if (MYgtid > ListLength) return;
// masked arrays of those pixels denoted as noise-free
uch noiseFreeLists[60];
uch *maskA = noiseFreeLists;
uch *maskB = maskA+14;
uch *maskC = maskB+14;
uch *maskD = maskC+14;
uch *listLengths = maskD+14;
uch *currMask;
uch currListLength;
// control and tracking variables
int i, j, row, col, indx, maskAIndx=0, maskBIndx=0, maskCIndx=0, maskDIndx=0, chosenMask=0;
float minStdDev=1000000.0, currStdDev, sum = 0.0, mean, standardDeviation = 0.0;
// obtain current noisy pixel indices
pixelCoords currCoord = locations[MYgtid];
ui MYrow = currCoord.i;
ui MYcol = currCoord.j;
// iterate through both 5x5 masks to find values of noise-free pixels
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
// find current absolute index
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
// if the current pixel is noise-free AND
if(noiseMap[indx]){
// if the current 5x5 horizontal mask cell is not 0
if((int)mask1[i+2][j+2]) {
// obtain noise free pixel and add to list
maskA[maskAIndx] = ImgSrc[indx];
maskAIndx++;
}
// if the current 5x5 vertical mask cell is not 0
if((int)mask2[i+2][j+2]) {
// obtain noise free pixel and add to list
maskB[maskBIndx] = ImgSrc[indx];
maskBIndx++;
}
}
}
}
// iterate through both 7x7 masks to find values of noise-free pixels
for (i = -3; i <= 3; i++){
for ( j = -3; j <= 3; j++){
// find current absolute index
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
// if the current pixel is noise-free AND
if(noiseMap[indx]){
// if the current 7x7 45 degree mask cell is not 0
if((int)mask3[i+3][j+3]) {
// obtain noise free pixel and add to list
maskC[maskCIndx] = ImgSrc[indx];
maskCIndx++;
}
// if the current 7x7 135 degree mask cell is not 0
if((int)mask4[i+3][j+3]) {
// obtain noise free pixel and add to list
maskD[maskDIndx] = ImgSrc[indx];
maskDIndx++;
}
}
}
}
// if the amounts of noise free pixels in any of the directional masks is
// below threshold R, then we use 3x3 convolution
// this helps to mitigate promoting false edges
if(maskAIndx < R || maskBIndx < R || maskCIndx < R || maskDIndx < R)
chosenMask = 0;
else {
// assign list lengths for smoother access
listLengths[0] = maskAIndx;
listLengths[1] = maskBIndx;
listLengths[2] = maskCIndx;
listLengths[3] = maskDIndx;
// find the mask index (from 1 to 4) of minimum standard deviation
for(i=0; i < 4; i++) {
currListLength = listLengths[i];
currMask = maskA+(i*14);
// first find mean of array
for(j = 0; j < currListLength; j++)
{
sum += (float)currMask[j];
}
mean = sum/currListLength;
// then find sum of individual deviations
for(j = 0; j < currListLength; j++)
standardDeviation += pow((float)currMask[j] - mean, 2);
// final StdDev^2 is normalized by list length
currStdDev = standardDeviation / currListLength;
if(currStdDev < minStdDev) {
chosenMask = i+1;
minStdDev = currStdDev;
}
}
}
// assign the mask index that was chosen
kernelIndices[MYgtid] = chosenMask;
}
// inpainting convolutions based on kernel indices
__global__
void Convolute(double *ImgCurr, double *ImgBW, pixelCoords *pc, uch *kernalI, ui numNoisy, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid >= numNoisy) return; // index out of range
// current noisy pixel coordinates
ui i=pc[MYgtid].i,j=pc[MYgtid].j,m=kernalI[MYgtid];
// absolute pixel index
ui MYpixIndex = i * Hpixels + j;
int a,b,row,col,index;
double C = 0.0;
// based on the kernel index, convolutes with the correct mask
switch(m)
{
case 0: for (a = -1; a <= 1; a++){
for (b = -1; b <= 1; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask0[a + 1][b + 1]);
}
}
ImgCurr[MYpixIndex] = C;
break;
case 1: for (a = -2; a <= 2; a++){
for (b = -2; b <= 2; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask1[a + 2][b + 2]);
}
}
ImgCurr[MYpixIndex] = C;
break;
case 2: for (a = -2; a <= 2; a++){
for (b = -2; b <= 2; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask2[a + 2][b + 2]);
}
}
ImgCurr[MYpixIndex] = C;
break;
case 3: for (a = -3; a <= 3; a++){
for (b = -3; b <= 3; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask3[a + 3][b + 3]);
}
}
ImgCurr[MYpixIndex] = C;
break;
default: for (a = -3; a <= 3; a++){
for (b = -3; b <= 3; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask4[a + 3][b + 3]);
}
}
// assign convolution sum to current noisy pixel index
ImgCurr[MYpixIndex] = C;
break;
}
}
// sum of absolute differences, reconstruction progress tracking mechanism
__global__
void SAD(ui *sad, double *prev, double *current, pixelCoords *pc, ui numNoisy, ui Hpixels, ui Vpixels)
{
// thread IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid >= numNoisy) return; // index out of range
ui i=pc[MYgtid].i, j=pc[MYgtid].j; // current noisy pixel coordinates
ui MYpixIndex = i * Hpixels + j; // absolute index
// difference of old and updated pixel values, round to nearest integer
int absDiff=(int)(prev[MYpixIndex]-current[MYpixIndex]+0.5);
// absolute difference
if(absDiff<0)
absDiff = -absDiff;
atomicAdd(sad, (ui)absDiff); // update global sum
}
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
// and a uch type for noisy pixel tracking
__global__
void BWKernel(uch *ImgBW, uch *ImgGPU, double *ImgfpBW, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (uch)((R+G+B)/3.0);
ImgfpBW[MYpixIndex] = (R+G+B)/3.0;
}
// Kernel that calculates a RGB (grayscale) version of B&W image for filing as Windows BMP
__global__
void RGBKernel(uch *ImgRGB, double *ImgBW, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYdstIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
uch pixInt = ImgBW[MYpixIndex];
// trivial copying: copy R=G=B = B&W pixel intensity
ImgRGB[MYdstIndex] = pixInt;
ImgRGB[MYdstIndex+1] = pixInt;
ImgRGB[MYdstIndex+2] = pixInt;
}
// Kernel that copies just the noisy pixels from one part of the
// GPU memory (ImgSrc) to another (ImgDst)
__global__
void NoisyPixCopy(double *NPDst, double *ImgSrc, pixelCoords *pc, ui NoisyPixelListLength, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid >= NoisyPixelListLength) return;// outside the allocated memory
pixelCoords currCoord = pc[MYgtid];
ui srcIndex = currCoord.i * Hpixels + currCoord.j;
NPDst[srcIndex] = ImgSrc[srcIndex];
}
// Kernel that copies an image from one part of the
// GPU memory (ImgSrc) to another (ImgDst)
__global__
void PixCopy(double *ImgDst, double *ImgSrc, ui FS)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid > FS) return; // outside the allocated memory
ImgDst[MYgtid] = ImgSrc[MYgtid];
}
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
float /*totalTime, tfrCPUtoGPU, tfrGPUtoCPU,*/ kernelExecutionTime; // GPU code run times
cudaError_t cudaStatus;
cudaEvent_t time1, time2;//, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks, /* GPUDataTransfer,*/ NumBlocksNP;
cudaDeviceProp GPUprop;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
ui GPUtotalBufferSize, R = 5, T = 5, NumNoisyPixelsCPU, mutexInit[4] = {0, 255, 0, 0};
ui CPU_SAD;
strcpy(ProgName, "randNoiseRemoval");
switch (argc){
case 6: ThrPerBlk = atoi(argv[5]);
case 5: R = atoi(argv[4]);
case 4: T = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [T] [R] [ThrPerBlk]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 5", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5",ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5 128",ProgName);
printf("\n\nT = reconstruction threshold, R = mask selection threshold\n\n");
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
free(TheImg);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
exit(EXIT_FAILURE);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1);
cudaEventCreate(&time2);
/*
>>> GPU STORAGE DETAILS >>>
***********************
GPUImage: IMAGESIZE
GPUCopyImage(BW) : IMAGEPIX
NoisyPixelCoords: IMAGEPIX*sizeof(pixelCoords)
NoiseMap : IMAGEPIX
KernelIndices : IMAGEPIX
GlobalMax : sizeof(ui)
GlobalMin : sizeof(ui)
NumNoisyPixelsGPU : sizeof(ui)
GPU_PREV_BW : sizeof(double) * IMAGEPIX
GPU_CURR_BW : sizeof(double) * IMAGEPIX
GPU_SAD : sizeof(ui)
***********************
*/
// allocate sufficient memory on the GPU to hold all above items
GPUtotalBufferSize = IMAGESIZE+(IMAGEPIX*sizeof(pixelCoords))+IMAGEPIX*3+sizeof(ui)*4+2*(sizeof(double)*IMAGEPIX);
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory for buffers");
exit(EXIT_FAILURE);
}
// setup buffer pointers for functions
GPUImg = (uch *)GPUptr;
GPUCopyImg = GPUImg + IMAGESIZE;
NoiseMap = GPUCopyImg + IMAGEPIX; // add the previous image/array of noisy pixel intensities
KernelIndices = NoiseMap + IMAGEPIX;
NoisyPixelCoords = (pixelCoords*)(KernelIndices + IMAGEPIX);
GPU_PREV_BW = (double*)(NoisyPixelCoords+IMAGEPIX);
GPU_CURR_BW = GPU_PREV_BW + IMAGEPIX;
GlobalMax = (ui*)(GPU_CURR_BW + IMAGEPIX);
GlobalMin = GlobalMax+1;
NumNoisyPixelsGPU = GlobalMin+1;
GPU_SAD = NumNoisyPixelsGPU+1;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for input image CPU to GPU failed!");
exit(EXIT_FAILURE);
}
// Copy mutex initializations from CPU to GPU
cudaStatus = cudaMemcpy(GlobalMax, mutexInit, 4*sizeof(ui), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for mutex initializations CPU to GPU failed!");
exit(EXIT_FAILURE);
}
// assume pixels are not noisy by default
cudaStatus = cudaMemset (NoiseMap, 1, IMAGEPIX );
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset for Noise Map failed!");
exit(EXIT_FAILURE);
}
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// calculate GPU-specific parameters
BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
// transform RGB input image into grayscale
BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, GPU_CURR_BW, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for B&WKernel returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// call kernel to locate the noisy pixels in the image
findNoisyPixels <<< NumBlocks, ThrPerBlk >>> (NoisyPixelCoords, GPUCopyImg, NoiseMap, GlobalMax, GlobalMin, NumNoisyPixelsGPU, IPH, IPV);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize for findNoisyPixels returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// copy the length of the list holding the noisy pixel locations from the GPU to CPU
cudaStatus = cudaMemcpy(&NumNoisyPixelsCPU, NumNoisyPixelsGPU, sizeof(ui), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy of NumNoisyPixels, GPU to CPU failed!");
exit(EXIT_FAILURE);
}
// only schedule as many threads are needed for NumNoisyPixelsCPU
NumBlocksNP = CEIL(NumNoisyPixelsCPU, ThrPerBlk);
// determineMasks tries to find the optimal inpainting masks to use for each noisy pixel
determineMasks <<< NumBlocksNP, ThrPerBlk >>> (NoisyPixelCoords, GPUCopyImg, NoiseMap, KernelIndices, NumNoisyPixelsCPU, IPH, R);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize for determineMasks returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// intially copy the current working version of the image to gain a previous version
PixCopy <<< NumBlocks, ThrPerBlk >>> (GPU_PREV_BW, GPU_CURR_BW, IMAGEPIX);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize for PixCopy returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// progress tracking
do{
// reset SAD (sum of absolute pixel differences)
cudaStatus = cudaMemset (GPU_SAD, 0, sizeof(ui) );
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset for GPU_SAD failed!");
exit(EXIT_FAILURE);
}
// perform convolutions with appropriate inpainting masks
Convolute <<< NumBlocksNP, ThrPerBlk >>> (GPU_CURR_BW, GPU_PREV_BW, NoisyPixelCoords, KernelIndices, NumNoisyPixelsCPU, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for Convolute returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// find sum of absolute differences for just the pixels denoted as noisy
SAD <<< NumBlocksNP, ThrPerBlk >>> (GPU_SAD, GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH, IPV);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for SAD returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// copy just the noisy pixel intensities from the current working image version to the previous version
NoisyPixCopy <<< NumBlocksNP, ThrPerBlk >>> (GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for NoisyPixCopy returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// CudaMemcpy the SAD from GPU to CPU (it is a GPU variable)
cudaStatus = cudaMemcpy(&CPU_SAD, GPU_SAD, sizeof(ui), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy of SAD from GPU to CPU failed!");
exit(EXIT_FAILURE);
}
} while(CPU_SAD > T); // iterate until the sum of absolute differences is below threshold
// must convert floating point B&W back to unsigned char format
NumBlocks = IPV*BlkPerRow;
RGBKernel <<< NumBlocks, ThrPerBlk >>> (GPUImg, GPU_CURR_BW, IPH);
GPUResult = GPUImg;
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
//Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
exit(EXIT_FAILURE);
}
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventElapsedTime(&kernelExecutionTime, time1, time2);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the denoised image back to disk
printf("\n\n--------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("--------------------------------------------------------------------------\n");
printf("%s %s %s %d %d %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName,
T, R, ThrPerBlk, NumBlocks, BlkPerRow);
printf("Kernel Execution =%7.2f ms\n", kernelExecutionTime);
printf("--------------------------------------------------------------------------\n\n");
// Deallocate CPU, GPU memory and destroy events
cudaFree(GPUptr);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
}
|
20,129 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void packGraph_gpu(int * newSrc, int * oldSrc, int * newDst, int * oldDst, int * newWeight, int * oldWeight, int * edgeMap, int numEdges) {
/*YOUR CODE HERE*/
}
|
20,130 | #include <iostream>
#include <cuda.h>
__global__ void addTidBid(int* data)
{
data[threadIdx.x + blockIdx.x * blockDim.x] = (threadIdx.x + blockIdx.x);
//printf("i am tid %d blockDim %d blockIdx %d\n", threadIdx.x, blockDim.x, blockIdx.x);
}
int main()
{
const int numElems = 16;
int *hArray = (int *) malloc(numElems * sizeof(int));
int* dArray; //initialize pointer on host
cudaMalloc((void **)&dArray, sizeof(int) * numElems); //allocate memory on device
cudaMemset(dArray, 0, numElems * sizeof(int)); //initilize them to zero
addTidBid<<<2,8>>>(dArray); //kernel launched 2blocks 8threads
cudaMemcpy(hArray, dArray, sizeof(int) * numElems, cudaMemcpyDeviceToHost);
cudaFree(dArray);
for (int i = 0; i < numElems; i++)
std::cout << hArray[i] << std::endl;
free(hArray);
return(0);
}
|
20,131 | __global__ void block_search(char* input_data, int input_length, char* patterns, int* pattern_starts, int pattern_count, int chunk_size, int* out_matches) {
char SEPARATOR = '\n';
int id = blockIdx.x;
int pattern_id = threadIdx.x;
int index = id * chunk_size;
int end_index = (id + 1) * chunk_size;
if (index >= input_length) {
return;
}
//Find start of first message
if (index != 0) { // First doesn't start with separator
while (input_data[index] != SEPARATOR) {
index++;
}
}
if (index >= end_index) {
return;
}
__shared__ int is_match;
is_match = false;
while (index < input_length) {
__syncthreads();
if (index >= end_index && input_data[index] == SEPARATOR) {
return;
}
int pattern_is_match = true;
int pattern_length = pattern_starts[pattern_id + 1] - pattern_starts[pattern_id];
for (int char_index=0; char_index < pattern_length; ++char_index) {
if (patterns[pattern_starts[pattern_id] + char_index] != input_data[char_index + index]) {
pattern_is_match = false;
break;
}
}
__syncthreads();
if (pattern_is_match) {
is_match = true;
}
__syncthreads();
if (is_match) {
if (threadIdx.x == 0) { // just once
out_matches[id] += 1;
}
__syncthreads();
is_match = false;
__syncthreads();
// Go to next message
while (input_data[index] != SEPARATOR) {
index++;
}
} else {
index++;
}
}
}
|
20,132 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
// both M and N must be evenly divisible by SIZE, M must be evenly divisible by CHKSIZE
#define SIZE 2
#define N 2
#define M 3
#define CHKSIZE 1
//naive kernel
__global__ void EuclideanDistancesNaive(float *A, float *B, float *C, int n,
int m) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
float result = 0.0f;
if ((idx < n) && (idy < m)) {
for (int i = 0; i < SIZE; i++) {
float temp = A[(idx * SIZE) + i] - B[(idy * SIZE) + i];
result += temp * temp;
}
C[(idx * m) + idy] = result;
}
}
//optimized kernel
__global__ void EuclideanDistancesFast(const float *A, const float *B, float *C,
const int n, const int m) {
// n, A, 4000 this kernel assumes A is column-major A(SIZE, n)
// m, B, 20000 this kernel assumes B is row-major B(m, SIZE)
// this kernel assumes C is column-major C(m,n)
// this kernel assumes number of threads per threadblock == SIZE
// CHKSIZE is the number of B vectors that will be compute per block
__shared__ float my_sB[CHKSIZE * SIZE]; // enough shared storage for CHKSIZE vectors of B
int bx = blockIdx.x; // one block per CHKSIZE rows of B (the larger input matrix)
while ((bx * CHKSIZE) < m) { // not used, this while loop could be used to extend a block to multiple chunks
int tx = threadIdx.x;
for (int i = 0; i < CHKSIZE; i++) // load vectors of B into shared memory
my_sB[(i * SIZE) + tx] = B[(((bx * CHKSIZE) + i) * SIZE) + tx];
__syncthreads();
while (tx < n) { //loop across all vectors in A
float result[CHKSIZE];
for (int i = 0; i < CHKSIZE; i++)
result[i] = 0.0f;
for (int i = 0; i < SIZE; i++) {
float Atemp = A[(n * i) + tx];
for (int j = 0; j < CHKSIZE; j++) { // compute all CHKSIZE B vectors with read of A
float temp = Atemp - my_sB[i + (j * SIZE)];
result[j] += temp * temp;
}
}
for (int i = 0; i < CHKSIZE; i++) // store CHKSIZE results
C[((i + (bx * CHKSIZE)) * n) + tx] = result[i];
tx += blockDim.x;
} // continue looping across vectors in A
__syncthreads(); // necessary to prevent warps from racing ahead, if block looping is used
bx += gridDim.x;
}
}
float comp_euclid_sq(const float *rA, const float *rB, const int size) {
float result = 0.0f;
float temp;
for (int i = 0; i < size; i++) {
temp = (rA[i] - rB[i]);
result += temp * temp;
}
return result;
}
int main_ed_gpu() {
float cpu_time = 0.0f, et1 = 0.0f, et2 = 0.0f, et_mem = 0.0f;
cudaEvent_t start1, start2, stop1, stop2, start_mem_copy, stop_mem_copy;
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&start_mem_copy);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
cudaEventCreate(&stop_mem_copy);
int n = N; //MatrixA size : n * SIZE
int m = M; //MatrixB size : m * SIZE
srand((unsigned) time(0));
// Host Allocations
float *matrixA = (float *) malloc(n * SIZE * sizeof(float));
for (int i = 0; i < n * SIZE; i++)
matrixA[i] = (float) i + 1;
float *matrixB = (float *) malloc(m * SIZE * sizeof(float));
for (int i = 0; i < m * SIZE; i++)
matrixB[i] = (float) i + i;
const clock_t begin_time = clock();
float *results_kernel = (float *) malloc(n * m * sizeof(float));
float *cpu_results_kernel = (float *) malloc(n * m * sizeof(float));
for (int i = 0; i < n * m; i++)
cpu_results_kernel[i] = comp_euclid_sq(matrixA + ((i / m) * SIZE),
matrixB + (i % m) * SIZE, SIZE);
cpu_time = float( clock () - begin_time ) / 1000;
//Device Allocation
cudaEventRecord(start_mem_copy);
float *d_matrixA;
float *d_matrixB;
cudaMalloc((void **) &d_matrixA, n * SIZE * sizeof(float));
cudaMalloc((void **) &d_matrixB, m * SIZE * sizeof(float));
cudaMemcpy(d_matrixA, matrixA, n * SIZE * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB, matrixB, m * SIZE * sizeof(float),
cudaMemcpyHostToDevice);
cudaEventElapsedTime(&et_mem, start_mem_copy, stop_mem_copy);
float *d_results_kernel;
cudaMalloc((void **) &d_results_kernel, n * m * sizeof(float));
cudaFuncSetCacheConfig(EuclideanDistancesNaive, cudaFuncCachePreferL1);
dim3 threads3(8, 32); // 1024 threads per block (maximum)
dim3 blocks3(n / threads3.x, m / threads3.y); // assumes evenly divisible
cudaEventRecord(start1);
EuclideanDistancesNaive<<<blocks3, threads3>>>(d_matrixA, d_matrixB,
d_results_kernel, n, m);
cudaEventRecord(stop1);
cudaMemcpy(results_kernel, d_results_kernel, n * m * sizeof(float),
cudaMemcpyDeviceToHost);
// for (int i = 0; i < n * m; i++) {
// if (results_kernel[i] != cpu_results_kernel[i]) {
// printf("cpu/kernel3 mismatch at %d, cpu: %f, kernel3: %f\n", i,
// cpu_results_kernel[i], results_kernel[i]);
// return 1;
// }
// }
cudaMemset(d_results_kernel, 0, n * m * sizeof(float));
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&et1, start1, stop1);
// transpose matrix A
float *matrixA_T = (float *) malloc(n * SIZE * sizeof(float));
for (int i = 0; i < n; i++)
for (int j = 0; j < SIZE; j++)
matrixA_T[(j * n) + i] = matrixA[(i * SIZE) + j];
cudaMemcpy(d_matrixA, matrixA_T, n * SIZE * sizeof(float),
cudaMemcpyHostToDevice);
cudaFuncSetCacheConfig(EuclideanDistancesFast, cudaFuncCachePreferL1);
dim3 threads4(SIZE); // one thread per vector element
dim3 blocks4(m / CHKSIZE);
cudaEventRecord(start2);
EuclideanDistancesFast<<<blocks4, threads4>>>(d_matrixA, d_matrixB,
d_results_kernel, n, m);
cudaEventRecord(stop2);
cudaMemcpy(results_kernel, d_results_kernel, n * m * sizeof(float),
cudaMemcpyDeviceToHost);
// test for correct transposed result C(m,n)
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
printf("%f ", results_kernel[(j * n) + i]);
if (results_kernel[(j * n) + i]
!= cpu_results_kernel[(i * m) + j]) {
printf("cpu/kernel4 mismatch at %d,%d, cpu: %f, kernel4: %f\n",
i, j, cpu_results_kernel[(i * m) + j],
results_kernel[(j * n) + i]);
return 1;
}
}
printf("\n");
}
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&et2, start2, stop2);
cudaFree(d_results_kernel);
printf("Success!\n");
printf("CPU: %.fms, kernel1 : %.fms, kernel2 : %.fms, Mem copy: %.fms\n",
cpu_time, et1, et2, et_mem);
free(matrixA);
free(matrixB);
free(results_kernel);
return 0;
}
|
20,133 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cooperative_groups.h"
#include <stdio.h>
#include <iostream>
#define CHUNKSIZE 6 // number of vectors of B residing in local shared memory
#define PDIM 3 // vector space dimension
cudaError_t addWithCuda(const int *a, const int *b, float *c, unsigned int BPG, unsigned int TPB, unsigned int size);
__device__ float euclidean(int* a, int* b)
{
float d = 0.0;
for (int idx = 0; idx < PDIM; idx++)
{
d += ((a[idx] - b[idx]) * (a[idx] - b[idx]));
}
return sqrtf(d);
}
// A and B are arrays holding integer coordinate vectors of dimensionality PDIM
// layout e.g. A[v1[0], v1[1], v1[2], v2[0], v2[1], v2[2], v3[0], v3[1], v3[2], ...]
__global__ void hDist(const int* A, const int* B, float* dists, const int cardA, const int cardB)
{
// shared cache for CHUNKSIZE point vectors of input matrix B
// shared memory is accessible on a per-block basis
__shared__ int chunk_cache_B[CHUNKSIZE * PDIM];
int trdix = threadIdx.x;
int blkix = blockIdx.x;
// populate block-local shared cache with vectors from B
for (int vecB_ix = 0; vecB_ix < CHUNKSIZE; vecB_ix++) {
for (int dim_idx = 0; dim_idx < PDIM; dim_idx++) {
chunk_cache_B[(vecB_ix * PDIM) + dim_idx + trdix] = B[((blkix * CHUNKSIZE) + vecB_ix) * PDIM + dim_idx + trdix];
}
}
__syncthreads();
int vecA_ix = 0;
int vector_cache_A[PDIM] = { 0 };
float dist_cache[CHUNKSIZE] = { 0.0 };
while ((blkix * CHUNKSIZE) < cardB) {
while (vecA_ix < cardA) {
for (int dim_idx = 0; dim_idx < PDIM; dim_idx++) {
vector_cache_A[dim_idx] = A[vecA_ix * dim_idx];
}
dist_cache[trdix] = euclidean(vector_cache_A, &chunk_cache_B[trdix * PDIM]);
vecA_ix += 1;
}
dists[blkix] = 1234;
}
// dists[trix] = sqrtf( (A[trix] + B[trix]) * (A[trix] + B[trix]));
}
int main()
{
const int pointdim = 3;
const int ca = 3;
const int cb = 4;
const int a[ca * pointdim] = { 1,2,3, 4,5,6, 7,8,9 };
const int b[cb * pointdim] = { 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120 };
float dsts[ca * cb] = { 0.0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(a, b, dsts, 2, 6, ca * cb);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for (int i = 0; i < ca*cb; i++) {
std::cout << dsts[i] << std::endl;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(const int* a, const int* b, float* c, unsigned int BPG, unsigned int TPB, unsigned int size)
{
std::cout << "Got size: " << size << std::endl;
int *dev_a = 0;
int *dev_b = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
if ((BPG * TPB) != size) {
fprintf(stderr, "INVALID BPG TPB");
cudaStatus = cudaSetDevice(0);
return cudaStatus;
}
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hDist<<<BPG, TPB>>>(dev_a, dev_b, dev_c, 1, 1);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
20,134 | /******************************
* Tisma Miroslav 2006/0395
* Multiprocesorski sistemi
* domaci zadatak 6 - 3. zadatak
*******************************/
/**
* 3. Sastaviti program koji racuna skalarni proizvod dva niza
*/
#include "cuda_runtime.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_OF_GPU_THREADS 256
__global__ void dotProduct(int *array1, int *array2, int n, int *result) {
int i, sum = 0;
int idx = threadIdx.x;
__shared__ int dotSum[NUM_OF_GPU_THREADS];
int slice = n / NUM_OF_GPU_THREADS;
int start = idx * slice;
if (idx == NUM_OF_GPU_THREADS - 1)
slice += n % NUM_OF_GPU_THREADS;
int end = start + slice;
for (i = start; i < end; i++) {
array1[i] = array1[i] * array2[i];
sum += array1[i];
}
dotSum[idx] = sum;
__syncthreads();
int half = NUM_OF_GPU_THREADS;
do {
__syncthreads();
half >>= 1;
if (idx < half)
dotSum[idx] += dotSum[idx + half];
} while(half != 1);
if (idx == 0)
*result = dotSum[0];
}
int main(int argc, char *argv[]) {
int i, n;
int *h_array1, *h_array2;
int *d_array1, *d_array2;
int h_result;
int *d_result;
printf("Nizovi ce biti ispisani na standardnom izlazu\n");
printf("Unesite velicinu nizova:\n");
scanf("%d", &n);
h_array1 = (int*)malloc(n*sizeof(int));
h_array2 = (int*)malloc(n*sizeof(int));
printf("\n");
srand(time(0));
for (i = 0; i < n; i++) {
h_array1[i] = -100 + rand() % 200;
printf("%2d ", h_array1[i]);
}
printf("\n");
for (i = 0; i < n; i++) {
h_array2[i] = -100 + rand() % 200;
printf("%2d ", h_array2[i]);
}
printf("\n");
cudaMalloc((void**)&d_array1, n*sizeof(int));
cudaMalloc((void**)&d_array2, n*sizeof(int));
cudaMalloc((void**)&d_result, sizeof(int));
cudaMemcpy(d_array1, h_array1, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, n*sizeof(int), cudaMemcpyHostToDevice);
dotProduct<<<1, NUM_OF_GPU_THREADS>>>(d_array1, d_array2, n, d_result);
cudaThreadSynchronize();
cudaMemcpy(&h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nSkalarni proizvod nizova je: %d\n", h_result);
cudaFree(d_array1);
cudaFree(d_array2);
cudaFree(d_result);
free(h_array1);
free(h_array2);
return EXIT_SUCCESS;
}
|
20,135 | #include "cuda_runtime.h"
#include "stdio.h"
int main(int argc, char* argv[]) {
int deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
printf("Device count: %d\n\n", deviceCount);
for (int i = 0; i < deviceCount; i++) {
cudaGetDeviceProperties(&deviceProp, i);
printf("Device %d name: %s\n", i + 1, deviceProp.name);
printf("Total global memory: %zu\n", deviceProp.totalGlobalMem);
printf("Shared memory per block: %zu\n", deviceProp.sharedMemPerBlock);
printf("Registers per block: %d\n", deviceProp.regsPerBlock);
printf("Warp size: %d\n", deviceProp.warpSize);
printf("Memory pitch: %zu\n", deviceProp.memPitch);
printf("Max threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("Max threads dimensions: x = %d, y = %d, z = %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("Max grid size: x = %d, y = %d, z = %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("Clock rate: %d\n", deviceProp.clockRate);
printf("Total constant memory: %zu\n", deviceProp.totalConstMem);
printf("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Texture alignment: %zu\n", deviceProp.textureAlignment);
printf("Device overlap: %d\n", deviceProp.deviceOverlap);
printf("Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
printf("Kernel execution timeout enabled: %s\n\n", deviceProp.kernelExecTimeoutEnabled ? "true" : "false");
}
return 0;
}
|
20,136 | #include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <string.h>
#include <cuda.h>
#define MAXN 1024
#define MaxProblem 1024
#define BLOCK_SIZE 512
#define UINT uint32_t
uint32_t hostMtx[2][MAXN*MAXN];
uint32_t Ret[2][MAXN*MAXN];
int problemindex=0;
int N;
//======================================================
__global__ void matrixAdd( int N,UINT* A, UINT* B, UINT* C){
int r = blockIdx.x;
int c = threadIdx.x;
int ptr = r*N + c;
C[ptr] = A[ptr] + B[ptr];
}
__global__ void matrixMul( int N,UINT* A, UINT* B, UINT* C){
int r = blockIdx.x;
int c = threadIdx.x;
int ptr = r*N + c;
UINT sum = 0;
for(int k=0; k<N; k++)
sum += A[r*N + k] * B[k*N + c];
C[ptr] = sum;
}
//============================================
void rand_gen(UINT c, int N, UINT A[MAXN*MAXN]) {
UINT x = 2, n = N*N;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
x = (x * x + c + i + j)%n;
A[i*N+j] = x;
}
}
}
UINT signature(int N, UINT A[MAXN*MAXN]) {
UINT h = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
h = (h + A[i*N+j]) * 2654435761LU;
}
return h;
}
//==========================================================
int main(int argc, char *argv[]) {
uint32_t S[MaxProblem][64],TotalN[MaxProblem];
while(scanf("%d", &TotalN[problemindex]) == 1){
for (int i = 0; i < 2; i++) {
scanf("%d", &S[problemindex][i]);
}
problemindex++;
}
//readIn();
uint32_t *cuIN[2], *cuTmp[6];
uint32_t memSz = MAXN*MAXN*sizeof(uint32_t);
for (int i = 0; i < 2; i++) {
cudaMalloc((void **) &cuIN[i], memSz);
}
for (int i = 0; i < 6; i++)
cudaMalloc((void **) &cuTmp[i], memSz);
for(int index=0;index<problemindex;index++){
N=TotalN[index];
#pragma omp parallel for
for (int i = 0; i < 2; i++) {
rand_gen(S[index][i], N, hostMtx[i]);
cudaMemcpy(cuIN[i], hostMtx[i], memSz, cudaMemcpyHostToDevice);
}
int num_blocks = (N*N-1)/BLOCK_SIZE +1 ;
// AB
//multiply(cuIN[0], cuIN[1], cuTmp[0]);
matrixMul<<<N, N>>>(N, cuIN[0], cuIN[1], cuTmp[0]);
// BA
//multiply(cuIN[1], cuIN[0], cuTmp[1]);
matrixMul<<<N, N>>>(N, cuIN[1], cuIN[0], cuTmp[1]);
//AB+BA
//add(cuTmp[0], cuTmp[1], cuTmp[2]);
matrixAdd<<<N, N>>>(N, cuTmp[0], cuTmp[1], cuTmp[2]);
// ABA
//multiply(cuTmp[0], cuIN[0], cuTmp[3]);
matrixMul<<<N, N>>>(N, cuTmp[0], cuIN[0], cuTmp[3]);
// BAB
//multiply(cuTmp[1], cuIN[1], cuTmp[4]);
matrixMul<<<N, N>>>(N, cuTmp[1], cuIN[1], cuTmp[4]);
//ABA+BAB
//add(cuTmp[3], cuTmp[4], cuTmp[5]);
matrixAdd<<<N, N>>>(N, cuTmp[3], cuTmp[4], cuTmp[5]);
cudaMemcpy(Ret[0], cuTmp[2], memSz, cudaMemcpyDeviceToHost);
cudaMemcpy(Ret[1], cuTmp[5], memSz, cudaMemcpyDeviceToHost);
uint32_t ret[2];
#pragma omp parallel for
for (int i = 0; i < 2; i++) {
ret[i] = signature(N, Ret[i]);
}
for (int i = 0; i < 2; i++)
printf("%u\n", ret[i]);
}
for (int i = 0; i < 2; i++)
cudaFree(cuIN[i]);
for (int i = 0; i < 6; i++)
cudaFree(cuTmp[i]);
return 0;
} |
20,137 | # include <stdio.h>
# include <stdlib.h> // To use the exit function and malloc
# include <string.h>
/*
* ============================================
* Find a word in a given string (CUDA version)
* ============================================
*
* Usage: find_word <word> <input_file>
*
* Given a word, load the first line of the input file and
* search the word in it. This version uses a CUDA-enabled
* graphics card.
*/
// Global constant
# define NOT_FOUND (-1)
// Function declaration
void validate_arguments(char *argv[], int argc);
FILE *open_or_die(char *filename, char *mode);
char *read_line_from(FILE *file);
int find_word_in_gpu(char *word, char *search_here);
// ----------------------------------------------------------------------------
// Kernel definition
void __global__ find_word_kernel(char *word, char *search_here, int *found_here, int ref_length) {
/*
* Search for the given word in the search_here string.
* At first occurrence, returns the starting position.
* If the word was not found, return -1.
*/
// The starting position of each thread is it's thread id
int start = threadIdx.x;
if (start < ref_length-1) { // Check for a valid position
//printf("Process starting from position %d\n\tword: %s\n\tstring: %s\n", start, word, search_here);
int found = 1; // Pretend you found it
int letters_coincide;
// ---> Check if the word is found from here
for (int j=0; word[j] != '\0'; j++) {
// Check if the letters coincide
letters_coincide = (search_here[start+j] == word[j]);
found = (found && letters_coincide);
}
// Place your mark
found_here[start] = found;
}
return;
} // --- find_word_kernel
// ----------------------------------------------------------------------------
/* --- << Main function >> --- */
int main(int argc, char *argv[]) {
// 1. ---> Find the input file and the word to search
// Ensure the arguments where passed correctly
validate_arguments(argv, argc);
// Get the input file and the word
char *word = argv[1];
FILE *input = open_or_die(argv[2], "r");
// Get the reference string
char *search_here = read_line_from(input);
// Close the input file
fclose(input);
// 2. ---> Search the word in the reference string
int found_here = find_word_in_gpu(word, search_here);
// 3. ---> Display the results
if( found_here == NOT_FOUND ) {
// The word was not found
printf("Sorry, the word was not found in the reference string\n");
printf("Word: %s\nReference string: %s\n\n", word, search_here);
} else {
// The word was found
printf("The word was found at position: %d\n", found_here);
// Signal the position
printf("Word: %s\nReference string: %s\n", word, search_here);
printf(" ");
for (int i=0; i < found_here-1; i++)
printf(" ");
printf("^\n\n");
}
// 4. ---> Cleanup
free(search_here);
return 0;
} // --- main
// ----------------------------------------------------------------------------
/* --- << Functions >> --- */
void validate_arguments(char *argv[], int argc) {
/*
* Check the arguments are OK
* On failure, exit with error.
*/
if (argc != 3) {
fprintf(stderr, "ERROR: Incorrect number of arguments\n");
fprintf(stderr, "Usage: %s <word> <input_file>\n", argv[0]);
exit(EXIT_FAILURE);
}
} // --- open_or_die
// --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- --
FILE *open_or_die(char *filename, char *mode) {
/*
* Open the file with the given 'filename' in the given mode.
* On success, return the file handler.
* On failure, exit with error.
*/
FILE *file = fopen(filename, mode);
// Check the file
if ( !file ) {
// There was an error opening the file
exit(EXIT_FAILURE);
}
return file;
} // --- open_or_die
// --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- --
#define BUFFER_DEFAULT_SIZE 100
char *read_until_next(FILE *file, char end) {
/* Read from the file until `end` or EOF is found.
* Returns a dynamically allocated buffer with the characters read (excluding `end` and EOF).
* The null terminator is guaranteed to be at the end.
* The file is left positioned at the next character after `end` or at EOF.
*/
int charactersCount = 0; // Total read characters (not counts null terminator)
// Allocate space for the contents
int bufferCapacity = BUFFER_DEFAULT_SIZE;
char *buffer = (char *) malloc( bufferCapacity * sizeof(char) );
char c;
while( 1 ) {
// Read a single char from the file
c = fgetc(file);
// Check for the character
if ( c == end || c == EOF ) {
// Finished, get out of the loop
break;
} else {
// Append `c` to the line ---
// Check if a reallocation is needed
if ( !(charactersCount+1 < bufferCapacity) ) {
// A reallocation is needed
bufferCapacity += BUFFER_DEFAULT_SIZE/2;
buffer = (char *) realloc(buffer, bufferCapacity);
}
// Append the character and the terminator
buffer[ charactersCount ] = c;
buffer[charactersCount + 1] = '\0';
charactersCount += 1;
}
}
// Not redundant when charactersCount = 0
buffer[charactersCount] = '\0';
// Free the allocated but unneeded space
buffer = (char *) realloc(buffer, charactersCount+1);
return buffer;
} // --- read_until_next
// --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- --
char *read_line_from(FILE *file) {
/*
* Read the next line of the file.
* Stops at newline or EOF.
* Returns a malloc'd buffer with the characters read, excluding the newline or EOF.
* The returned buffer is guaranteed to be properly null-terminated
*/
return read_until_next(file, '\n'); // Read until newline or EOF
} // --- open_or_die
// --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- --
// --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- --
int find_word_in_gpu(char *word, char *search_here) {
/*
* Search for the given word in the search_here string.
* At first occurrence, returns the starting position.
* If the word was not found, return NOT_FOUND.
* Uses a CUDA-enabled graphics card
*/
// Lookup the lengths of the words
int word_length = strlen(word);
int ref_length = strlen(search_here);
int found_here = NOT_FOUND;
// Copy the word and the search_string to the GPU
char *word_tmp;
cudaMallocManaged(&word_tmp, word_length * sizeof(char));
strcpy(word_tmp, word);
char *ref_tmp;
cudaMallocManaged(&ref_tmp, ref_length * sizeof(char));
strcpy(ref_tmp, search_here);
// Prepare for the arrival of the result
int *found_here_tmp;
cudaMallocManaged(&found_here_tmp, ref_length * sizeof(int));
for (int i=0; i < ref_length; i++) {
found_here_tmp[i] = 0;
}
// Launch the Kernel
printf("Launching %d threads in a single block\n", ref_length);
find_word_kernel<<<1, ref_length>>>(word_tmp, ref_tmp, found_here_tmp, ref_length);
cudaDeviceSynchronize();
// Fetch the result
for (int i=0; i<ref_length; i++) {
if ( found_here_tmp[i] ) {
found_here = i;
break;
}
}
// Free unneeded memory
cudaFree(found_here_tmp);
cudaFree(word_tmp);
cudaFree(ref_tmp);
// Return the result
return found_here;
} // --- find_word_in_gpu
|
20,138 | #include <iostream>
using namespace std;
__global__ void task1(){
printf("Hello World! I am thread %d.\n",threadIdx.x);
}
int main(){
task1<<<1,4>>>();
cudaDeviceSynchronize();
return 0;
}
|
20,139 | /*
* Copyright (c) 2014 by Joern Dinkla, www.dinkla.com, All rights reserved.
*
* See the LICENSE file in the root directory.
*/
#include <algorithm> // CUDA 6.5 requires this for std::min
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <iostream>
using namespace std;
struct double_functor {
__device__ int operator()(const int value) {
return value * 2;
}};
void thrust_map_beispiel() {
double_functor f;
thrust::device_vector<int> d(1024);
thrust::sequence(d.begin(), d.end(), 1);
thrust::transform(d.begin(), d.end(), d.begin(), f);
for (int i = 0; i < d.size(); i++) {
cout << "d[" << i << "] = " << d[i] << endl;
}}
|
20,140 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <cstdlib>
// #include "random.h"
#define MBIG 1000000000
#define MSEED 161803398
#define MZ 0
#define FAC (1.0/MBIG)
__host__ __device__ float ran3(int idum)
//int *idum;
{
static int inext,inextp;
static long ma[56];
static int iff=0;
long mj,mk;
int i,ii,k;
if (idum < 0 || iff == 0) {
iff=1;
mj=MSEED-(idum < 0 ? -idum : idum);
mj %= MBIG;
ma[55]=mj;
mk=1;
for (i=1;i<=54;i++) {
ii=(21*i) % 55;
ma[ii]=mk;
mk=mj-mk;
if (mk < MZ) mk += MBIG;
mj=ma[ii];
}
for (k=1;k<=4;k++)
for (i=1;i<=55;i++) {
ma[i] -= ma[1+(i+30) % 55];
if (ma[i] < MZ) ma[i] += MBIG;
}
inext=0;
inextp=31;
idum=1;
}
if (++inext == 56) inext=1;
if (++inextp == 56) inextp=1;
mj=ma[inext]-ma[inextp];
if (mj < MZ) mj += MBIG;
ma[inext]=mj;
return mj*FAC;
}
#undef MBIG
#undef MSEED
#undef MZ
#undef FAC
struct rand_vec4_bigaussian
{
__host__ __device__
float4 operator()(float4 a)
{
float ampx, ampy, amp, r1, r2, facc;
float x,px, y, py;
float betax;
float emix;
float betay;
float emiy;
uint seed;
betax = 10.0;
betay = 20.0;
emix = 1e-9;
emiy = 2e-9;
seed=12489;
// 1 sigma rms beam sizes using average ring betas
ampx = sqrt(betax*emix);
ampy = sqrt(betay*emiy);
do
{
r1 = 2*ran3(seed)-1;
r2 = 2*ran3(seed)-1;
amp = r1*r1+r2*r2;
}
while ((amp >=1) || (amp<=3.e-6));
facc = sqrt(-2*log(amp)/amp); // transforming [-1,1] uniform to gaussian - inverse transform
x = ampx * r1 * facc; // scaling the gaussian
px = ampx * r2 * facc; // scaling the gaussian
// generate bi-gaussian distribution in the y-py phase-space
do
{
r1 = 2*ran3(seed)-1;
r2 = 2*ran3(seed)-1;
amp = r1*r1+r2*r2;
}
while ((amp >=1) || (amp<=3.e-6));
facc = sqrt(-2*log(amp)/amp); // transforming [-1,1] uniform to gaussian - inverse transform
y = ampy* r1 * facc; // scaling the gaussian
py = ampy* r2 * facc; // scaling the gaussian
a = make_float4(x,px,y,py);
return a;
};
};
__host__ std::ostream& operator<< (std::ostream& os, const float4& p)
{
os << "["<< p.x << "," << p.y << "," << p.z <<"," << p.w <<"]";
return os;
}
int main(int argc, char const *argv[])
{
thrust::device_vector<float> Y(10);
thrust::fill(Y.begin(),Y.end(),ran3(12489));
rand_vec4_bigaussian func;
thrust::device_vector<float4> v(1000000);
thrust::transform(v.begin(),v.end(),v.begin(),func);
thrust::copy(Y.begin(),Y.end(),std::ostream_iterator<float>(std::cout,"\n"));
// std::copy(v.begin(), v.end(), std::ostream_iterator<float4>(std::cout, " $ ") );
// thrust::copy(v.begin(),v.end(),std::ostream_iterator<float>(std::cout,"\n"));
std::cout << ran3(12489);
return 0;
} |
20,141 | /*
* Authors:
* Oren Freifeld, freifeld@csail.mit.edu
* Yixin Li, Email: liyixin@mit.edu
*/
__global__ void clear_fields(int * count, double * log_count,
int * mu_i_h, int * mu_s_h, double * mu_i, double * mu_s, double * sigma_s_h,
const int dim_i, const int nsuperpixel){
int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
count[k] = 0;
log_count[k] = 0.0;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i_h[dim_i*k+d] = 0;
mu_i[dim_i*k+d] = 0;
}
mu_s_h[2*k] = mu_s_h[2*k+1] = 0;
mu_s[2*k] = mu_s[2*k+1] = 0;
sigma_s_h[3*k] = sigma_s_h[3*k+1] = sigma_s_h[3*k+2] = 0;
}
__global__ void sum_by_label(
double * img, int * seg,
int * count, int * mu_i_h, int * mu_s_h, unsigned long long int * sigma_s_h,
const int xdim, const int ydim, const int dim_i, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
//get the label
const int k = seg[t];
atomicAdd(&count[k] , 1);
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
atomicAdd(&mu_i_h[dim_i*k+d], img[dim_i*t+d]);
}
int x = t % xdim;
int y = t / xdim;
int xx = x * x;
int xy = x * y;
int yy = y * y;
atomicAdd(&mu_s_h[2*k], x);
atomicAdd(&mu_s_h[2*k+1], y);
atomicAdd(&sigma_s_h[3*k], xx);
atomicAdd(&sigma_s_h[3*k+1], xy);
atomicAdd(&sigma_s_h[3*k+2], yy);
}
__global__ void calculate_mu_and_sigma(
int * counts, double* log_count, int * mu_i_h, int * mu_s_h,
double * mu_i, double * mu_s,
unsigned long long int * sigma_s_h, double * prior_sigma_s,
double * sigma_s, double * logdet_Sigma_s, double * J_s,
const int prior_count, const int dim_i, const int nsuperpixel)
{
const int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
double count = double (counts[k]);
double mu_x = 0.0;
double mu_y = 0.0;
//calculate the mean
if (count>0){
//X[k] /= count
log_count[k] = log(count);
mu_x = mu_s_h[2*k] / count;
mu_y = mu_s_h[2*k+1]/ count;
mu_s[2*k] = mu_x;
mu_s[2*k+1] = mu_y;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i[dim_i*k+d] = mu_i_h[dim_i*k+d]/count;
}
}
//calculate the covariance
double C00,C01,C11;
C00 = C01 = C11 = 0;
int total_count = counts[k] + prior_count;
if (count > 3){
//update cumulative count and covariance
C00= sigma_s_h[3*k] - mu_x * mu_x * count;
C01= sigma_s_h[3*k+1] - mu_x * mu_y * count;
C11= sigma_s_h[3*k+2] - mu_y * mu_y * count;
}
C00 = (prior_sigma_s[k*4] + C00) / (double(total_count) - 3);
C01 = (prior_sigma_s[k*4+1] + C01)/ (double(total_count) - 3);
C11 = (prior_sigma_s[k*4+3] + C11) / (double(total_count) - 3);
double detC = C00 * C11 - C01 * C01;
if (detC <= 0){
C00 = C00 + 0.00001;
C11 = C11 + 0.00001;
detC = C00*C11-C01*C01;
if(detC <=0) detC = 0.0001;//hack
}
//set the sigma_space
sigma_s[k*4] = C00;
sigma_s[k*4+1] = C01;
sigma_s[k*4+2] = C01;
sigma_s[k*4+3] = C11;
//Take the inverse of sigma_space to get J_space
J_s[k*4] = C11 / detC;
J_s[k*4+1] = -C01/ detC;
J_s[k*4+2] = -C01/ detC;
J_s[k*4+3] = C00/ detC;
logdet_Sigma_s[k] = log(detC);
}
__global__ void clear_fields_2(int * count, int * mu_i_h, int * mu_s_h, const int dim_i, const int nsuperpixel){
int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
//clear the fields
count[k] = 0;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i_h[dim_i*k+d] = 0;
}
mu_s_h[2*k] = mu_s_h[2*k+1] = 0;
}
__global__ void sum_by_label_2(double * img, int * seg, int * count, int * mu_i_h, int * mu_s_h,
const int xdim, const int ydim, const int dim_i, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x *blockDim.x;
if (t>=nPts) return;
//get the label
const int k = seg[t];
atomicAdd(&count[k] , 1);
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
atomicAdd(&mu_i_h[dim_i*k+d], img[dim_i*t+d]);
}
atomicAdd(&mu_s_h[2*k], t % xdim);
atomicAdd(&mu_s_h[2*k+1], t / xdim);
}
__global__ void calculate_mu(
int * counts, int * mu_i_h, int * mu_s_h, double * mu_i, double * mu_s,
const int dim_i, const int nsuperpixel)
{
const int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
double count = double (counts[k]);
if (count>0){
mu_s[2*k] = mu_s_h[2*k] / count;
mu_s[2*k+1] = mu_s_h[2*k+1]/ count;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i[dim_i*k+d] = mu_i_h[dim_i*k+d]/count;
}
}
} |
20,142 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include <chrono>
using namespace std;
using namespace std::chrono;
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "GS_1024x1024.ppm"//baboon.pgm"//
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
int x,y;
fp = fopen(filename, "w");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
void HostPrewitt(int xsize, int ysize,int thresh, unsigned int *input_pic, int *output)
{
int i, j, magnitude, sum1, sum2;
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = input_pic[ xsize * (i-1) + j+1 ] - input_pic[ xsize*(i-1) + j-1 ]
+ input_pic[ xsize * (i) + j+1 ] - input_pic[ xsize*(i) + j-1 ]
+ input_pic[ xsize * (i+1) + j+1 ] - input_pic[ xsize*(i+1) + j-1 ];
sum2 = input_pic[ xsize * (i-1) + j-1 ] + input_pic[ xsize * (i-1) + j ] + input_pic[ xsize * (i-1) + j+1 ]
- input_pic[xsize * (i+1) + j-1 ] - input_pic[ xsize * (i+1) + j ] - input_pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
output[offset] = 255;
else
output[offset] = 0;
}
}
}
__global__ void gpuPrewittWithoutSm(int xsize, int ysize,int thresh, unsigned int *input_pic, int *result)
{
int by = blockIdx.x; int bx = blockIdx.y;
int ty = threadIdx.x; int tx = threadIdx.y;
int tile_w = 5;
int Row = by * tile_w + ty;
int Col = bx * tile_w + tx;
if((Row > 0) && (Row < ysize-1)&&(Col > 0) && (Col < xsize-1))
{
int offset = Row*xsize + Col;
int magnitude, sum1, sum2;
sum1 = input_pic[ xsize * (Row-1) + Col+1 ] - input_pic[ xsize*(Row-1) + Col-1 ]
+ input_pic[ xsize * (Row) + Col+1 ] - input_pic[ xsize*(Row) + Col-1 ]
+ input_pic[ xsize * (Row+1) + Col+1 ] - input_pic[ xsize*(Row+1) + Col-1 ];
sum2 = input_pic[ xsize * (Row-1) + Col-1 ] + input_pic[ xsize * (Row-1) + Col ] + input_pic[ xsize * (Row-1) + Col+1 ]
- input_pic[xsize * (Row+1) + Col-1 ] - input_pic[ xsize * (Row+1) + Col ] - input_pic[ xsize * (Row+1) + Col+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
__global__ void gpuPrewittWITHSm(int xsize, int ysize,int thresh, unsigned int *input_pic, int *result)
{
const int tile_w = 5;
__shared__ int blk_share[tile_w+2][tile_w+2];
int by = blockIdx.x; int bx = blockIdx.y;
int ty = threadIdx.x; int tx = threadIdx.y;
int Row = by * tile_w + ty;
int Col = bx * tile_w + tx;
blk_share[ty+1][tx+1] = input_pic[ xsize * (Row) + Col ];
if((ty == 0) && (Row > 0))
{
blk_share[ty][tx+1] = input_pic[ xsize * (Row-1) + Col ];
}
else if((ty == tile_w-1) && (Row < ysize-1))
{
blk_share[ty+2][tx+1] = input_pic[ xsize * (Row+1) + Col ];
}
if((tx==0)&&(Col>0))
{
blk_share[ty+1][tx] = input_pic[ xsize * (Row) + Col-1 ];
}
else if((tx == tile_w-1) && (Col < xsize-1))
{
blk_share[ty+1][tx+2] = input_pic[ xsize * (Row) + Col+1 ];
}
if((tx==1) && (ty==1)&&(Col-2>=0)&&(Row-2>=0))
{
blk_share[0][0] = input_pic[ xsize * (Row-2) + Col-2 ];
}
else if((tx==tile_w-2) && (ty==tile_w-2)&&(Row+2 <=ysize-1) && (Col+2 <= xsize-1))
{
blk_share[tile_w+1][tile_w+1] = input_pic[ xsize * (Row+2) + Col+2 ];
}
else if((tx ==tile_w-2) && (ty==1)&&(Row-2>=0)&&(Col+2 <= xsize-1))
{
blk_share[0][tile_w+1] = input_pic[ xsize * (Row-2) + Col+2 ];
}
else if((tx ==1) && (ty==tile_w-2)&&(Row+2 <=ysize-1)&&(Col-2>=0))
{
blk_share[tile_w+1][0] = input_pic[ xsize * (Row+2) + Col-2 ];
}
__syncthreads();
if((Row > 0) && (Row < ysize-1)&&(Col > 0) && (Col < xsize-1))
{
int offset = Row*xsize + Col;
int magnitude, sum1, sum2;
ty++;
tx++;
{
sum1 = blk_share[ty-1][tx+1] - blk_share[ty-1][tx-1]
+ blk_share[ty][tx+1] - blk_share[ty][tx-1]
+ blk_share[ty+1][tx+1] - blk_share[ty+1][tx-1];
sum2 = blk_share[ty-1][tx-1] + blk_share[ty-1][tx] + blk_share[ty-1][tx+1]
- blk_share[ty+1][tx-1] - blk_share[ty+1][tx] - blk_share[ty+1][tx+1];
}
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
__syncthreads();
}
void GPUPrewittHandler(int xsize, int ysize,int thresh, unsigned int *input_pic, int *output)
{
int size = xsize * ysize * sizeof(unsigned int);
unsigned int *pic;
int *result;
int tile_w = 5;
cudaMalloc(&pic, size);
cudaMemcpy(pic, input_pic, size, cudaMemcpyHostToDevice);
cudaMalloc(&result, size);
dim3 DimGrid(ceil(double(ysize)/tile_w), ceil(double(xsize)/tile_w), 1);
dim3 DimBlock(5, 5, 1);
// gpuPrewittWithoutSm<<<DimGrid, DimBlock>>>(xsize, ysize, thresh, pic, result);
gpuPrewittWITHSm<<<DimGrid, DimBlock>>>(xsize, ysize, thresh, pic, result);
cudaMemcpy(output, result, size, cudaMemcpyDeviceToHost);
// Free device matrices
cudaFree(pic); cudaFree(result);
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * sizeof( int );//3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "prewitt() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int i, j, magnitude, sum1, sum2;
int *out = result;
for (int col=0; col<ysize; col++) {
for (int row=0; row<xsize; row++) {
*out++ = 0;
}
}
// take time snap before Perwit
high_resolution_clock::time_point t1 = high_resolution_clock::now();
//HostPrewitt(xsize, ysize,thresh, pic, result);
GPUPrewittHandler(xsize, ysize, thresh, pic, result);
// take time snap after Perwit
high_resolution_clock::time_point t2 = high_resolution_clock::now();
// print the Time taken to Multiply two Matrices
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << "Perwit Time CPU(us):" << duration << "\n";
write_ppm( "result.ppm", xsize, ysize, 255, result);
fprintf(stderr, "prewitt done\n");
} |
20,143 | #include <iostream>
#include <vector>
bool cudaCheck(cudaError_t err) {
if (err != cudaSuccess) {
std::cerr << "Code Failed due to " << cudaGetErrorString(err) << std::endl;
exit(EXIT_FAILURE);
}
return true;
}
void printProp(cudaDeviceProp devP) {
std::cout << "Name: " << devP.name << std::endl;
std::cout << "\tTotal Global Memory: " << devP.totalGlobalMem << std::endl;
std::cout << "\tShared Memory per Block: " << devP.sharedMemPerBlock << std::endl;
std::cout << "\tWarp Size: " << devP.warpSize << std::endl;
std::cout << "\tMax Threads per Block: " << devP.maxThreadsPerBlock << std::endl;
std::cout << "\tNumber of multiprocessors: " << devP.multiProcessorCount << std::endl;
for (int i = 0; i < 3; i++) {
std::cout << "\tMax of dimension " << i << " of block: " << devP.maxThreadsDim[i] << std::endl;
}
for (int i = 0; i < 3; i++) {
std::cout << "\tMax of dimension " << i << " of grid: " << devP.maxGridSize[i] << std::endl;
}
}
void DeviceProp() {
int devCount ;
cudaGetDeviceCount(&devCount) ;
for (int i = 0; i < devCount ; ++i) {
cudaDeviceProp devP;
cudaGetDeviceProperties(&devP, i);
printProp(devP);
}
}
int vecToArr(std::vector< int > &v, int **A) {
*A = new int[v.size()];
for (int i = 0; i < v.size(); i++) {
(*A)[i] = v[i];
}
return v.size();
} |
20,144 | #include <iostream>
#include <iomanip>
using namespace std;
void VectorAdditionCPU(int* a, int* b, int* c, int size)
{
for(int i = 0; i < size; i++)
c[i] = a[i] + b[i];
}
void __global__ VectorAdditionGPU(int* a, int* b, int* c, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
{
c[index] = a[index] + b[index];
__syncthreads();
}
}
void __global__ VectorAdditionGPUSharedMemory(int* a, int* b, int* c, int size, int sharedMemorySize)
{
// Yet to be done
}
int main()
{
int size = 100000000;
float start;
float stop;
float cpu;
float gpu;
// VectorAdditionCPU Start
int *a, *b, *cCPU, *cGPU;
a = new int[size];
b = new int[size];
cCPU = new int[size];
cGPU = new int[size];
// Generating Vectors
for(int i = 0; i < size; i++)
{
a[i] = 100;
b[i] = 200;
}
start = clock();
VectorAdditionCPU(a, b, cCPU, size);
stop = clock();
cpu = (stop - start) / (CLOCKS_PER_SEC) * 1000;
cout << "Time needed for the CPU to add " << size << " pairs of integers : " << cpu << " ms" << endl;
// VectorAdditionCPU End
// VectorAdditionGPU Start
int *device_a, *device_b, *device_c;
cudaMalloc(&device_a, size * sizeof(int));
cudaMalloc(&device_b, size * sizeof(int));
cudaMalloc(&device_c, size * sizeof(int));
cudaMemcpy(device_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
start = clock();
VectorAdditionGPU<<<size / 1024 + 1, 1024>>>(device_a, device_b, device_c, size);
stop = clock();
cudaMemcpy(cGPU, device_c, size * sizeof(int), cudaMemcpyDeviceToHost);
gpu = (stop - start) / (CLOCKS_PER_SEC) * 1000;
cout << "Time needed for the GPU to add " << size << " pairs of integers : " << gpu << " ms" << endl;
// VectorAdditionGPU End
cout << "Performance Gain using GPU : " << (int)(cpu / gpu) << " times" << endl;
bool error = false;
for(int i = 0; i < size; i++)
if(cGPU[i] != cCPU[i])
error = true;
if(error)
cout << "Results don't match!" << endl;
delete [] a;
delete [] b;
delete [] cCPU;
delete [] cGPU;
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
|
20,145 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void person_threshold(unsigned char * pix, int cols, int count){
int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char p = (unsigned char)0;
float ff;
if(i < count - cols ){
int diffX = ((int)pix[i+ cols] - (int)pix[i + cols- 1]);
int diffY = ((int)pix[i + cols] - (int)pix[i]);
p = (unsigned char)((int)sqrtf((diffX * diffX) + (diffY * diffY)));
__syncthreads();
pix[i] = p;
}
__syncthreads();
if( i> 0 && count - cols-1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +=p;
if( i > 0 && i < count - cols - 1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +p;
if( i > 0 &&i < count - cols - 1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +=p;
int radius = 1;
if(i > cols + 1 && i < count - cols - 1 && i % cols != 0){
p = (
abs(pix[i-1-cols] - pix[i])
+ abs(pix[i-cols] - pix[i])
+ abs(pix[i + 1 - cols] - pix[i])
+ abs(pix[i -1] - pix[i]) + abs( pix[i + 1] - pix[i])
+ abs(pix[i - 1 + cols] - pix[i])
+ abs(pix[i + cols] - pix[i])
+ abs(pix[i + 1 + cols] - pix[i])
)/8;
}
__syncthreads();
pix[i] = p;
}
void edgedetect(unsigned char * pic, int count, int cols){
const int numthreads = count;
const int blocks = numthreads/512;
const int block_width = 512;
unsigned char * data = NULL;
cudaMalloc(&data, count * sizeof(unsigned char));
cudaMemcpy(data, pic, count* sizeof(unsigned char), cudaMemcpyHostToDevice);
person_threshold<<<blocks, block_width >>>(data, cols, count);
cudaMemcpy(pic, data, count * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(data);
}
|
20,146 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void launch_keep_item_kernel(float* input_array, int input_size, float threshold, float* output_array, int output_capacity){
/*
该函数做的事情如图:
figure/2.launch_keep_item_kernel.png
*/
int input_index = blockIdx.x * blockDim.x + threadIdx.x;
if(input_array[input_index] < threshold)
return;
int output_index = atomicAdd(output_array, 1); // figure/atomicAdd.png 图解
if(output_index >= output_capacity) // 如果计的数 超出 容量
return;
float* output_item = output_array + 1 + output_index * 2;
output_item[0] = input_array[input_index];
output_item[1] = input_index;
}
void launch_keep_item(float* input_array, int input_size, float threshold, float* output_array, int output_capacity){
const int nthreads = 512;
int block_size = input_size < nthreads ? input_size : nthreads;
int grid_size = (input_size + block_size - 1) / block_size;
launch_keep_item_kernel<<<grid_size, block_size, block_size * sizeof(float), nullptr>>>(input_array, input_size, threshold, output_array, output_capacity);
} |
20,147 | #include "includes.h"
__global__ void Divide( float * x, size_t idx, size_t N, float W0, float W1)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
if (abs(x[(idx-2)*N+i]) > 0.00000001)
x[(idx-2)*N+i] = ((W0*x[(idx-1)*N+i]) / (W1*x[(idx-2)*N+i]));
else
x[(idx-2)*N+i] = 1.0 ;
//printf("Result is %f\n", x[(idx-2)*N+i]);
}
return;
} |
20,148 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector_types.h>
#include <cuda_runtime.h>
#define EPS2 1.0E-9
__device__ float3
bodyBodyInteraction(float4 bi, float4 bj, float3 ai)
{
float3 r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = bj.w * invDistCube;
//a_i= a_i+s*r_ij[6FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
//printf("ai.x : %f\n", ai.x);
return ai;
}
__global__ void
calculate_forces(float4 *devX, float4 *devA, int N, int numTiles)
{
extern __shared__ float4 shPosition[];
float4 *globalX = devX;
float4 *globalA = devA;
float4 myPosition;
float3 acc = {0.0f, 0.0f, 0.0f};
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if(gtid >= N) return;
myPosition = globalX[gtid];
for (int i = 0; i < numTiles; i++) {
int idx = i * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = globalX[idx];
__syncthreads();
//printf("myPosition.x = %f, shPosition[%d].x = %f, globalX[%d].x = %f\n", myPosition.x, threadIdx.x, shPosition[threadIdx.x].x, idx, globalX[idx].x);
for (int j = 0; j < blockDim.x; j++) {
acc = bodyBodyInteraction(myPosition, shPosition[j], acc);
}
__syncthreads();
}
// Save the result in global memory for the integration step.
float4 acc4 = {acc.x, acc.y, acc.z, 0.0f};
globalA[gtid] = acc4;
}
|
20,149 | ////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU.
// Straight accumulation in double precision.
////////////////////////////////////////////////////////////////////////////
#include <iostream>
//extern "C"
void Kernel_3_Max_CPU_MPI(int *Max_All, int *Length_Seq_K4_All, int *A_Location_All, int *B_Location_All,
int *Max_K3 , int *Length_Seq_K4 , int *A_Location , int *B_Location,
int K3_Report, int K_3_R, int K3_Length, int K3_Safety, int NumProcs)
{
for (int k=0; k<K_3_R; k++)
{
// if (k==2 || k==12 || k==86) for (int j=0; j<K3_Report; j++) for (int i=j+1; i<K3_Report; i++) if (A_Location_All[j*K3_Length*K3_Safety+k*K3_Report*K3_Length*K3_Safety]==A_Location_All[i*K3_Length*K3_Safety+k*K3_Report*K3_Length*K3_Safety]) printf(" %i %i %i %i %i %i \n", k, j, i, Max_All[j+k*K3_Report],Length_Seq_K4_All[j+k*K3_Report],
// A_Location_All[j*K3_Length*K3_Safety+k*K3_Report*K3_Length*K3_Safety] );
int n=0;
do
{
int Max_Val_MPI = 0;
int Max_Loc_MPI = 0;
for (int j=0; j<NumProcs; j++)
{
int Start = j*K3_Report*K_3_R + k * K3_Report;
int End = Start + K3_Report;
for (int i=Start; i<End; i++)
{
if (Max_Val_MPI<Max_All[i])
{
Max_Val_MPI = Max_All[i];
Max_Loc_MPI = i;
}
}
}
for (int m=0; m<n; m++)
{
if ((Max_K3[m+k*K3_Report]==Max_Val_MPI) && (Length_Seq_K4_All[Max_Loc_MPI]==Length_Seq_K4[m+k*K3_Report]) &&
(A_Location_All[Max_Loc_MPI*K3_Length*K3_Safety]==A_Location[m*K3_Length*K3_Safety + k*K3_Report*K3_Length*K3_Safety]))
{
m=n;
Max_All[Max_Loc_MPI]=0;
Max_Val_MPI=0;
n--;
// printf("-------------------------\n");
}
}
if (Max_Val_MPI!=0)
{
Max_K3[n + k*K3_Report]=Max_Val_MPI;
Max_All[Max_Loc_MPI]=0;
Length_Seq_K4[n + k*K3_Report]=Length_Seq_K4_All[Max_Loc_MPI];
for (int i=0; i<(K3_Length*K3_Safety); i++)
{
A_Location[i + n*K3_Length*K3_Safety + k*K3_Report*K3_Length*K3_Safety]=A_Location_All[i + Max_Loc_MPI*K3_Length*K3_Safety];
B_Location[i + n*K3_Length*K3_Safety + k*K3_Report*K3_Length*K3_Safety]=B_Location_All[i + Max_Loc_MPI*K3_Length*K3_Safety];
}
}
n++;
// printf(" %i %i %i \n", n, k, Max_Val_MPI);
} while (n<K3_Report);
// printf("-----------------------------------------------------------\n");
}
}
|
20,150 | //t̂ƌő̂̉^vZʂԂCŏIIȌʂvZ
//HACK::͒Pɐ`
//HACK::̂łȂ̂ɒ
#ifndef _GPU_ICE_INTERPOLATION_H_
#define _GPU_ICE_INTERPOLATION_H_
#include <iostream>
#include <cuda_runtime.h>
using namespace std;
void LaunchInterPolationGPU(int prtNum, float* sldPrtPos, float* sldPrtVel, float* sphPrtPos, float* sphPrtVel);
__global__ void LinerInterPolation(float* sldPrtPos, float* sldPrtVel, float* sphPrtPos, float* sphPrtVel, int side);
void LaunchInterPolationGPU(int prtNum, float* sldPrtPos, float* sldPrtVel, float* sphPrtPos, float* sphPrtVel)
{ //cout << __FUNCTION__ << endl;
int side = pow(prtNum, 1.0/3.0) + 0.5; //̂̂Pӂ̒_
dim3 grid(side, side);
dim3 block(side, 1, 1);
//`
LinerInterPolation<<<grid ,block>>>(sldPrtPos, sldPrtVel, sphPrtPos, sphPrtVel, side);
cudaThreadSynchronize();
}
__global__
void LinerInterPolation(float* sldPrtPos, float* sldPrtVel, float* sphPrtPos, float* sphPrtVel, int side)
{
//vZ闱q̔
int pIndx = blockIdx.x * side * side + blockIdx.y * side + threadIdx.x;
int sphIndx = pIndx * 4;
int sldIndx = pIndx * 3;
//`
sphPrtPos[sphIndx+0] = sldPrtPos[sldIndx+0];
sphPrtPos[sphIndx+1] = sldPrtPos[sldIndx+1];
sphPrtPos[sphIndx+2] = sldPrtPos[sldIndx+2];
sphPrtVel[sphIndx+0] = sldPrtVel[sldIndx+0];
sphPrtVel[sphIndx+1] = sldPrtVel[sldIndx+1];
sphPrtVel[sphIndx+2] = sldPrtVel[sldIndx+2];
}
#endif |
20,151 | //
// Example from:
// https://devblogs.nvidia.com/even-easier-introduction-cuda/
//
// Compiles as follows:
// nvcc add_v03.cu -o add_v3_cuda
//
// nvcc is set as follows:
// export PATH=/usr/local/cuda-10.0/bin:$PATH
#include <iostream>
#include <math.h>
// CUDA *kernel* function to add the elements of two arrays
//
// The __global__ specifier tells the CUDA C++ compiler that
// this is a function that runs on the GPU and can be called
// from CPU code
//
// These __global__ functions are known as *kernels*, and code
// that runs on the GPU is often called *device code*, while code
// that runs on the CPU is *host code*
//
// Here we'll *spread* calculations over *multiple threads*
// AND *multiple block* which will reduce the execution time
// even further
//
__global__ void add(int n, float* x, float* y)
{
// Properly spread calculations among threads AND blocks
//
// CUDA GPUs have many parallel processors grouped into
// Streaming Multiprocessors (SM). Each SM can run multiple
// concurrent thread blocks
// E.g. a Tesla P100 GPU based on the Pascal GPU Architecture
// has 56 SMs, each can support up to 2048 active threads
// (so 256 threads is a relatively small number of threads
// for such kind of architecture)
// Together, the blocks of parallel threads make up what is
// known as the *grid*.
// To take full advantage of all these threads, we should
// launch the kernel with multiple thread blocks
//
// CUDA C++ provides keywords that let kernels get
// the indices of the running threads.
// Specifically, threadIdx.x contains the index of
// the current thread within its block, and blockDim.x
// contains the number of threads in the block
// CUDA also provides gridDim.x, which contains the number
// of blocks in the grid, and blockIdx.x, which contains
// the index of the current thread block in the grid
//
// In CUDA kernel the type of loop shown below is often
// *grid-stride loop*
//
int index = blockIdx.x * blockDim.x + threadIdx.x;
//e.g.index = (2) * (256) + (3) = 515
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
// As part of the exercise, try prinf()
/*
if ( i < 1000 )
{
printf( "Thread id=%d, block id=%d \n", threadIdx.x, blockIdx.x );
}
*/
y[i] = x[i] + y[i];
}
}
int main(void)
{
// left shift by 20 bits
//
int N = 1<<20; // (more than) 1M elements
// Allocate memory in *CUDA*
// it's Unified Memory that's accessible
// from CPU and/or GPU
//
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host N elements each
//
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch the add() kernel on GPU
// the <<< >>> syntax is CUDA kernel launch(es)
// Here we pick up multiple blocks of threads,
// with 256 threads per block
// CUDA GPUs run kernels using blocks of threads
// that are a multiple of 32 in size
// Modern architrecture can support more than 2000 threads
// per block, so 256 threads is a reasonable number to choose
// We have to calculate (estimate) number of blocks needed
// to process N elements in parallel (i.e. how many blocks
// we need to get at least N threads)
// NOTE: since N is not necessarily a multiple of 256,
// we may need to round up the number of blocks
// See more comments in the add's kernel code
//
int block_size = 256;
int num_blocks = ( N + block_size - 1 ) / block_size;
add <<< num_blocks, block_size >>>( N, x, y );
// Last but not least !!!
// Tell the *CPU* to wait until the kernel is done
// before accessing results (because CUDA kernel lauches
// don't block calling CPU thread, i.e. if a kernel is launched
// on a certain thread, control immediately returns to CPU;
// this is different from memory management which is synch-ed
// "by nature", i.e. nothing is launched untill all transfers
// are complete)
//
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
//
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
maxError = fmax(maxError, fabs(y[i]-3.0));
}
std::cout << "Max error: " << maxError << std::endl;
// free memory in CUDA
//
cudaFree(x);
cudaFree(y);
return 0;
}
|
20,152 | #include <cuda.h>
#include <iostream>
unsigned char* gpu_bgr_image;
unsigned char* gpu_hsv_image;
unsigned char* gpu_bin_image;
unsigned char* gpu_vhgw_s_array;
unsigned char* gpu_vhgw_r_array;
unsigned char* gpu_fil_image;
__global__ void bgr2hsv(unsigned char* bgr_image, unsigned char* hsv_image)
{
int idx = blockIdx.x*blockDim.x*blockDim.y + (threadIdx.y*blockDim.x + threadIdx.x);
idx *= 3;
int b = bgr_image[idx];
int g = bgr_image[idx + 1];
int r = bgr_image[idx + 2];
int M = max(max(b,g),r);
int m = min(min(b,g),r);
float C = M - m;
if(C == 0)
{
hsv_image[idx ] = 0;
hsv_image[idx + 1] = 0;
hsv_image[idx + 2] = 0;
return;
}
int s = (int)(255*C/M);
if(s == 0)
{
hsv_image[idx ] = 0;
hsv_image[idx + 1] = 0;
hsv_image[idx + 2] = M;
return;
}
int h=0;
if(M == r)
{
h = (int)(30*(g-b)/C);
if(h < 0) h += 180;
}
else if(M == g)
h = 60 + (int)(30*(b-r)/C);
else if(M == b)
h = 120 + (int)(30*(b-r)/C);
hsv_image[idx ] = h;
hsv_image[idx + 1] = s;
hsv_image[idx + 2] = M;
}
__global__ void in_range(unsigned char* image_src, unsigned char* image_dst, int min_0, int min_1, int min_2, int max_0, int max_1, int max_2)
{
int idx = blockIdx.x*blockDim.x*blockDim.y + (threadIdx.y*blockDim.x + threadIdx.x);
unsigned char ch0 = image_src[3*idx];
unsigned char ch1 = image_src[3*idx + 1];
unsigned char ch2 = image_src[3*idx + 2];
if(ch0 > min_0 && ch0 < max_0 && ch1 > min_1 && ch1 < max_1 && ch2 > min_2 && ch2 < max_2)
image_dst[idx] = 255;
else
image_dst[idx] = 0;
}
void gpu_segment_by_color(unsigned char* bgr_image, unsigned char* segmented_image, int img_rows, int img_cols, int img_channels)
{
dim3 block_size(32,32,1);
dim3 grid_size(300, 1,1);
if(cudaMemcpy(gpu_bgr_image, bgr_image, img_rows*img_cols*img_channels, cudaMemcpyHostToDevice) != cudaSuccess)
std::cout << "Cannot copy from host to device." << std::endl;
bgr2hsv<<<grid_size, block_size>>>(gpu_bgr_image, gpu_hsv_image);
if(cudaThreadSynchronize() != cudaSuccess)
std::cout << "There were errors while launching threads." << std::endl;
in_range<<<grid_size, block_size>>>(gpu_hsv_image, gpu_bin_image, 150,150,150,180,255,255);
if(cudaThreadSynchronize() != cudaSuccess)
std::cout << "There were errors while launching threads." << std::endl;
cudaMemcpy(segmented_image, gpu_bin_image, img_rows*img_cols, cudaMemcpyDeviceToHost);
}
void gpu_allocate_memory(int img_rows, int img_cols, int img_channels)
{
if(cudaSetDevice(0) != cudaSuccess) std::cout << "Cannot set GPU 0 " << std::endl;
std::cout << "Allocating memory in GPU..." << std::endl;
if(cudaMalloc(&gpu_bgr_image , img_rows*img_cols*img_channels) != cudaSuccess) std::cout << "Cannot allocate" << std::endl;
if(cudaMalloc(&gpu_hsv_image , img_rows*img_cols*img_channels) != cudaSuccess) std::cout << "Cannot allocate" << std::endl;
if(cudaMalloc(&gpu_bin_image , img_rows*img_cols*img_channels) != cudaSuccess) std::cout << "Cannot allocate" << std::endl;
if(cudaMalloc(&gpu_vhgw_s_array, img_rows*img_cols*img_channels) != cudaSuccess) std::cout << "Cannot allocate" << std::endl;
if(cudaMalloc(&gpu_vhgw_r_array, img_rows*img_cols*img_channels) != cudaSuccess) std::cout << "Cannot allocate" << std::endl;
if(cudaMalloc(&gpu_fil_image , img_rows*img_cols*img_channels) != cudaSuccess) std::cout << "Cannot allocate" << std::endl;
}
void gpu_free_memory()
{
std::cout << "Releasing memory in GPU..." << std::endl;
if(cudaFree(gpu_bgr_image) != cudaSuccess) std::cout << "Cannot release memory" << std::endl;
if(cudaFree(gpu_hsv_image) != cudaSuccess) std::cout << "Cannot release memory" << std::endl;
if(cudaFree(gpu_bin_image) != cudaSuccess) std::cout << "Cannot release memory" << std::endl;
if(cudaFree(gpu_vhgw_s_array)!= cudaSuccess) std::cout << "Cannot release memory" << std::endl;
if(cudaFree(gpu_vhgw_r_array)!= cudaSuccess) std::cout << "Cannot release memory" << std::endl;
if(cudaFree(gpu_fil_image) != cudaSuccess) std::cout << "Cannot release memory" << std::endl;
}
|
20,153 | #include <stdlib.h>
#include <stdio.h>
#define kernel_exec(x,y) x,y; cuda_kernel_error(__FILE__, __LINE__)
inline void cuda_kernel_error(const char* file, int linenum){
cudaError_t errcode=cudaGetLastError();
if(errcode!=cudaSuccess){
printf("Kernel error in file %s line %d: %s\n", file, linenum, cudaGetErrorString(errcode));
exit(-1);
}
}
#define kernel_stream_exec(x,y,z,k) x,y,z,k; cuda_kernel_error(__FILE__, __LINE__)
inline void cuda_kernel_stream_error(const char* file, int linenum){
cudaError_t errcode=cudaGetLastError();
if(errcode!=cudaSuccess){
printf("Kernel error in file %s line %d: %s\n", file, linenum, cudaGetErrorString(errcode));
exit(-1);
}
}
#define cuda_call(x) cuda_call_check(__FILE__, __LINE__, x)
inline void cuda_call_check(const char* file, int linenum, cudaError_t errcode){
if(errcode!=cudaSuccess){
printf("CUDA error in file %s line %d: %s\n", file, linenum, cudaGetErrorString(errcode));
exit(-1);
}
}
|
20,154 | #include "includes.h"
#define ROUND_OFF 50000
#define CUDA_NUM_THREADS 1024
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define GET_BLOCKS(n, t) (n+t-1) / t
// == Dimension rearrangement Kernel
__global__ void blob_rearrange_kernel2_1d(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight)
{
int xy = blockIdx.x*blockDim.x + threadIdx.x;
if(xy>=widthheight)
return;
int ch = blockIdx.y;
int n = blockIdx.z;
float value=in[(n*channels+ch)*widthheight+xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + 0);
int xypad = ypad * (width+2*padding) + xpad;
out[(n*pwidthheight+xypad)*channels + ch] = value;
} |
20,155 | #include "includes.h"
__global__ void matrixSub(double *a, double *b, double *c, int cr, int cc){
long x = blockIdx.x * blockDim.x + threadIdx.x; // col
long y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x] - b[y * cc + x];
}
} |
20,156 | #include "includes.h"
__global__ void rectified_linear_upd_kernel( const float4 * __restrict input, float4 * __restrict output, uint4 * __restrict bits_buffer, float negative_slope, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
float4 val;
uint4 bits;
if (elem_id < elem_count)
val = input[elem_id];
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
bits.x = __ballot(val.x < 0.0F ? 0 : 1);
bits.y = __ballot(val.y < 0.0F ? 0 : 1);
bits.z = __ballot(val.z < 0.0F ? 0 : 1);
bits.w = __ballot(val.w < 0.0F ? 0 : 1);
#else
bits.x = __ballot_sync(0xFFFFFFFF, val.x < 0.0F ? 0 : 1);
bits.y = __ballot_sync(0xFFFFFFFF, val.y < 0.0F ? 0 : 1);
bits.z = __ballot_sync(0xFFFFFFFF, val.z < 0.0F ? 0 : 1);
bits.w = __ballot_sync(0xFFFFFFFF, val.w < 0.0F ? 0 : 1);
#endif
#endif
if (elem_id < elem_count)
{
int lane_id = elem_id & 31;
if (lane_id == 0)
bits_buffer[elem_id >> 5] = bits;
if (val.x < 0.0F)
val.x *= negative_slope;
if (val.y < 0.0F)
val.y *= negative_slope;
if (val.z < 0.0F)
val.z *= negative_slope;
if (val.w < 0.0F)
val.w *= negative_slope;
output[elem_id] = val;
}
} |
20,157 | // ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// Managing Arrays between Host and Device
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <stdio.h>
#include <iostream>
using namespace std;
#define N 10
// --------------------- CUDA KERNELS
// kernel function for changing host copied arrays
__global__ void showArray(int *dev_arr)
{
// print the variables copied to the kernel
for(int i=0; i<N; i++)
{
// change the array elements in the device
dev_arr[i] = i;
// printf("** from device _arr[%d] = %d\n", i, _arr[i]);
}
}
// the main is a host code
int main(int argc, const char * argv[])
{
cout << "------------ initialising device and host arrays" << endl;
int arr[N]; // host variable
// int *arr;
// arr = (int*)malloc(N*sizeof(int));
int *dev_arr; // device variable
for(int i=0; i<N; i++)
{
arr[i] = 0;
printf("host arr[%d] = %d\n", i, arr[i]);
}
cout << "------------ allocate device memory dev_arr" << endl;
// allocating a device array to copy to
// note the N * sizeof(int)
cudaMalloc( (void**)&dev_arr, N * sizeof(int) );
cout << "------------ copy arr to dev_arr" << endl;
// copying host array to device
// note the N * sizeof(int)
cudaMemcpy(dev_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice);
cout << "------------ calling kernel showArray" << endl;
showArray<<<1,1>>>(dev_arr);
cout << "------------ copy dev_arr to arr" << endl;
// note the N * sizeof(int)
cudaMemcpy(arr, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << "------------ printing changed host array" << endl;
for(int i=0; i<N; i++)
{
printf("** changed host arr[%d] = %d\n", i, arr[i]);
}
// ---- FREE ALLOCATED KERNEL MEMORY
cudaFree( dev_arr );
return 0;
}
|
20,158 | #include "includes.h"
__global__ void divergenceL(float *v, float *d, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
/*
float AX = 0;
if ((idx < N) && (px<(nx-1))) AX += v[2*(idx )+0];
if ((idx < N) && (px>0)) AX -= v[2*(idx-1 )+0];
if ((idx < N) && (py<(ny-1))) AX += v[2*(idx )+1];
if ((idx < N) && (py>0)) AX -= v[2*(idx-nx)+1];
if (idx < N) d[idx] = AX;
*/
if(px<nx && py<ny)
{
float AX = 0;
if((px<(nx - 1))) AX += v[2 * (idx)+0];
if((px>0)) AX -= v[2 * (idx - 1) + 0];
if((py<(ny - 1)))
AX += v[2 * (idx)+1];
if((py>0))
AX -= v[2 * (idx - nx) + 1];
d[idx] = AX;
}
} |
20,159 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#define MAXN 1000000
//todo: read clusters from file
//todo: the choise for init clusters
//todo: the ending criteria
const int ThreadsPerBlock = 1024; // max value since CC2.0
int BlocksPerGrid = 0;
int N; // number of points
int K; // number of clusters
int T; // number of iterations
char INPUT_FILE[256]; // input file name
float *POINTS; // POINTS[i*2+0]:x POINTS[i*2+1]:y
int *CLASSES; // class for each point
int *NUM_CLASSES; // number of points in each class
float *CLUSTERS; // position for each cluster
// size for each array
size_t S_POINTS;
size_t S_CLASSES;
size_t S_NUM_CLASSES;
size_t S_CLUSTERS;
// values on CUDA device
float *D_POINTS; // POINTS[i*2+0]:x POINTS[i*2+1]:y
int *D_CLASSES; // class for each point
int *D_NUM_CLASSES; // number of points in each class
float *D_CLUSTERS; // position for each cluster
void write_results(int n, int k){
FILE *outputFile;
int i;
outputFile = fopen("Classes.txt", "w");
for(i=0;i<n;i++){
fprintf(outputFile, "%d\n", CLASSES[i]);
}
fclose(outputFile);
outputFile = fopen("Clusters.txt", "w");
for(i=0;i<k;i++){
fprintf(outputFile, "%f,%f\n", CLUSTERS[i*2], CLUSTERS[i*2+1]);
}
fclose(outputFile);
}
void update_classes(int n, int k){ //based on CLUSTERS
int i,j,minK;
float minDis, dis, disX, disY;
for(i=0;i<n;i++){
disX = POINTS[i*2]-CLUSTERS[0];
disY = POINTS[i*2+1]-CLUSTERS[1];
minK = 0;
minDis = disX*disX + disY*disY;
for(j=1;j<k;j++){
disX = POINTS[i*2]-CLUSTERS[j*2];
disY = POINTS[i*2+1]-CLUSTERS[j*2+1];
dis = disX*disX + disY*disY;
if(dis<minDis){
minK = j;
minDis = dis;
}
}
CLASSES[i] = minK;
}
}
__global__ void cuda_update_classes_kernel(const float *d_points, const float *d_clusters, int *d_classes, int n, int k){
int i,j,minK;
float minDis, dis, disX, disY;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i <= n){
disX = d_points[i*2]-d_clusters[0];
disY = d_points[i*2+1]-d_clusters[1];
minK = 0;
minDis = disX*disX + disY*disY;
for(j=1;j<k;j++){
disX = d_points[i*2]-d_clusters[j*2];
disY = d_points[i*2+1]-d_clusters[j*2+1];
dis = disX*disX + disY*disY;
if(dis<minDis){
minK = j;
minDis = dis;
}
}
d_classes[i] = minK;
}
}
void cuda_update_classes(int n, int k){ //based on CLUSTERS
// test code begin
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int err;
err = 1;
err &= cudaMemcpy(D_POINTS, POINTS, S_POINTS, cudaMemcpyHostToDevice) == cudaSuccess;
err &= cudaMemcpy(D_CLUSTERS, CLUSTERS, S_CLUSTERS, cudaMemcpyHostToDevice) == cudaSuccess;
if (!err)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
// test code end
cuda_update_classes_kernel<<<BlocksPerGrid, ThreadsPerBlock>>>(D_POINTS, D_CLUSTERS, D_CLASSES, n, k);
// test code begin
err = 1;
err &= (cuerr = cudaMemcpy(CLASSES, D_CLASSES, S_CLASSES, cudaMemcpyDeviceToHost)) == cudaSuccess;
//printf("err code %s %d\n", cudaGetErrorString(cuerr), err);
if (!err)
{
fprintf(stderr, "Failed to copy data from device to host\n");
exit(EXIT_FAILURE);
}
// test code end
}
void update_clusters(int n, int k){ // based on CLASSES
int i;
int _class;
for(i=0;i<k;i++){
CLUSTERS[i*2]=0;
CLUSTERS[i*2+1]=0;
NUM_CLASSES[i]=0;
}
for(i=0;i<n;i++){
_class = CLASSES[i];
NUM_CLASSES[_class]++;
CLUSTERS[_class*2] += POINTS[i*2];
CLUSTERS[_class*2+1] += POINTS[i*2+1];
}
for(i=0;i<k;i++){
//if(NUM_CLASSES[i]!=0){
CLUSTERS[i*2] /= NUM_CLASSES[i]; // produce nan when divided by 0
CLUSTERS[i*2+1] /= NUM_CLASSES[i];
//}
}
}
void clean_clusters(int *K){ // remove empty clusters, CLASSES are invalid after this process
int i = 0;
while(i<*K){
if(NUM_CLASSES[i]==0){
CLUSTERS[i*2] = CLUSTERS[*K * 2];
CLUSTERS[i*2+1] = CLUSTERS[*K * 2 + 1];
NUM_CLASSES[i]= NUM_CLASSES[*K];
(*K)--;
}else{
i++;
}
}
}
void init(int n, int k, char *input){ // malloc and read points (and clusters)
FILE *inputFile;
int i;
float x,y;
// read points
S_POINTS = n * 2 * sizeof(float);
POINTS = (float*)malloc(S_POINTS);
inputFile = fopen(input, "r");
for(i=0;i<n;i++){
if(fscanf(inputFile, "%f,%f\n", &x, &y)==2){
POINTS[i*2] = x;
POINTS[i*2+1] = y;
}
}
fclose(inputFile);
// classes init
S_CLASSES = n * sizeof(int);
CLASSES = (int*)malloc(S_CLASSES);
// clusters init
S_NUM_CLASSES = k * sizeof(int);
S_CLUSTERS = k * 2 * sizeof(float);
NUM_CLASSES = (int*)malloc(S_NUM_CLASSES);
CLUSTERS = (float*)malloc(S_CLUSTERS);
for(i=0;i<k;i++){
CLUSTERS[i*2]=POINTS[i*2];
CLUSTERS[i*2+1]=POINTS[i*2+1];
}
}
void cuda_init(int n, int k){ // malloc and copy data to device
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int noerr = 1;
// malloc
noerr &= (cuerr = cudaMalloc((void **)&D_POINTS, S_POINTS)) == cudaSuccess;
//printf("err code %s\n", cudaGetErrorString(cuerr));
noerr &= (cuerr = cudaMalloc((void **)&D_CLASSES, S_CLASSES)) == cudaSuccess;
noerr &= (cuerr = cudaMalloc((void **)&D_NUM_CLASSES, S_NUM_CLASSES)) == cudaSuccess;
noerr &= (cuerr = cudaMalloc((void **)&D_CLUSTERS, S_CLUSTERS)) == cudaSuccess;
if (!noerr)
{
fprintf(stderr, "Failed to allocate device vector\n");
exit(EXIT_FAILURE);
}
// copy data
noerr = 1;
noerr &= cudaMemcpy(D_POINTS, POINTS, S_POINTS, cudaMemcpyHostToDevice) == cudaSuccess;
noerr &= cudaMemcpy(D_CLUSTERS, CLUSTERS, S_CLUSTERS, cudaMemcpyHostToDevice) == cudaSuccess;
if (!noerr)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
// blocksPerGrid
BlocksPerGrid = (n + ThreadsPerBlock - 1) / ThreadsPerBlock;
printf("Using %d blocks of %d threads\n", BlocksPerGrid, ThreadsPerBlock);
}
int data_count(char *fileName){
FILE *inputFile;
float x, y;
int count=0;
inputFile = fopen(fileName, "r");
while(fscanf(inputFile, "%f,%f\n", &x, &y)==2){
count++;
//printf("%f,%f\n",tmp1,tmp2);
}
fclose(inputFile);
return count;
}
int cmd_parser(int argc, char **argv, int *n, int *k, int *t, char *input){
int invalid;
int valid;
char ch;
char usage[] = "Usage: %s -n N -k K -t T -i Input.txt\n"
" N: Number_of_Points, default: the number of lines in Input_File\n"
" K: default: 2\n"
" T: max iterations for the kmeans algorithm\n"
" Input: should be n lines, two floats in each line and split by ','\n"
" Results will be in Classes.txt and Clusters.txt\n";
invalid = 0;
valid = 0;
if(argc==1){
invalid = 1;
}
//default values
*n = -1;
*k = 2;
*t = 1;
while((ch = getopt(argc, argv, "n:k:t:i:h")) != -1) {
switch(ch) {
case 'n':
sscanf(optarg, "%d", n);
break;
case 'k':
sscanf(optarg, "%d", k);
break;
case 't':
sscanf(optarg, "%d", t);
break;
case 'i':
strncpy(input, optarg, 256);
valid = 1;
break;
case 'h': //print help
invalid = 1;
break;
case '?':
invalid = 1;
default:
;
}
}
if(valid && *n==-1){
*n = data_count(input);
}
invalid = invalid || !valid;
if(invalid){
printf(usage, argv[0]);
}
if(*n>MAXN){
invalid = 1;
printf("N is too large\n");
}
//printf("option N: %d\n", *n);
//printf("option K: %d\n", *k);
//printf("option T: %d\n", *t);
//printf("option Input: %s\n", input);
//printf("invalid %d\n", invalid);
return invalid;
}
int main(int argc, char **argv) {
int t;
if(cmd_parser(argc, argv, &N, &K, &T, INPUT_FILE)){ // not enough parameters
return 1;
}
init(N, K, INPUT_FILE);
cuda_init(N, K);
update_classes(N, K);
cuda_update_classes(N, K);
for(t=0;t<T;t++){
if(t!=0){
clean_clusters(&K);
}
//update_classes(N, K);
cuda_update_classes(N, K);
update_clusters(N, K);
}
write_results(N, K);
return 0;
}
|
20,160 | #include <stdio.h>
#include <stdlib.h>
__global__ void check_handshaking_gpu(int * strongNeighbor, int * matches, int numNodes) {
/** YOUR CODE GOES BELOW **/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = tid; i < numNodes; i+= num_threads){
if(matches[i] == -1){
int j = strongNeighbor[i];
if((i == strongNeighbor[j])){
matches[i] = j;
matches[j] = i;
}
}
}
/** YOUR CODE GOES ABOVE **/
}
|
20,161 | //xfail:ASSERTION_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foor(float *v, unsigned int size, unsigned int i)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f;
if (i == 1)
f = multiplyByTwo;
else if (i == 2)
f = divideByTwo;
else
f = NULL;
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
}
}
|
20,162 | #include <iostream>
using namespace std;
__global__ void stdeviation(int *a, float *b, float mean, int n){
int block =blockIdx.x;
b[0] = 0.0;
for(int i= block; i<n; i++){
b[0] += (a[i] - mean)*(a[i] - mean);
}
b[0] = b[0]/n;
}
int main(){
int n;
cin>>n;
int a[n];
for(int i=0; i<n; i++){
a[i] = i+1;
}
float mean = (n + 1)/2;
int *ad;
float *b;
cudaMalloc(&ad, n*sizeof(int));
cudaMalloc(&b, sizeof(float));
cudaMemcpy(ad, a, n*sizeof(int), cudaMemcpyHostToDevice);
stdeviation<<<n, 1>>> (ad, b, mean, n);
float ans[1];
cudaMemcpy(ans, b,sizeof(float), cudaMemcpyDeviceToHost);
cout<<"Answer is: "<<sqrt(ans[0])<<endl;
}
|
20,163 | /*
Mary Barker
Homework 6
Vector dot product on GPU to compile: nvcc BarkerHW6.cu
*/
#include <sys/time.h>
#include <stdio.h>
#define N 10000 //if N is greater than dimBlock.x program will break
#define MIN(x,y) (x<y)?x:y
float *A_CPU, *B_CPU, *C_CPU; //CPU pointers
float *A_GPU, *B_GPU, *C_GPU; //GPU pointers
dim3 grid, block;
void AllocateMemory()
{
//Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array
cudaMalloc(&A_GPU,N*sizeof(float));
cudaMalloc(&B_GPU,N*sizeof(float));
cudaMalloc(&C_GPU,N*sizeof(float));
//Allocate Host (CPU) Memory
A_CPU = (float*)malloc(N*sizeof(float));
B_CPU = (float*)malloc(N*sizeof(float));
C_CPU = (float*)malloc(N*sizeof(float));
block = MIN(1024, N);
grid = (N > 1024) ? ((N - 1) / block.x + 1) : 1;
}
//Loads values into vectors that we will add.
void Innitialize()
{
int i;
for(i = 0; i < N; i++)
{
A_CPU[i] = (float)1;
B_CPU[i] = (float)1;
}
}
//Cleaning up memory after we are finished.
void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free
{
free(A_CPU); free(B_CPU); free(C_CPU);
cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU);
}
//This is the kernel. It is the function that will run on the GPU.
//It adds vectors A and B then stores result in vector C
__global__ void DotProduct(float *A, float *B, float *C, int n)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int odd, offset = blockDim.x * blockIdx.x, new_n = blockDim.x;
bool not_done_yet = true;
if(id < n) C[id] = A[id] * B[id];
// 'Fold' the vector in half repeatedly
while(not_done_yet)
{
__syncthreads();
odd = new_n % 2;
new_n = new_n / 2;
if(new_n > 0)
{
if(id < (offset + new_n))
{
if(id + new_n < n)
{
C[id] += C[id+new_n];
if( (odd > 0) && (id < offset + 1) )
C[id] += C[id+2*new_n];
}
}
}
else
{
not_done_yet = false;
}
}
}
int main()
{
int i;
timeval start, end;
//Partitioning off the memory that you will be using.
AllocateMemory();
//Loading up values to be added.
Innitialize();
//Starting the timer
gettimeofday(&start, NULL);
//Copy Memory from CPU to GPU
cudaMemcpy(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice);
//Calling the Kernel (GPU) function.
DotProduct<<<grid, block>>>(A_GPU, B_GPU, C_GPU, N);
//Copy Memory from GPU to CPU
cudaMemcpy(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
if(grid.x > 1)
{
for(i = 1; i < grid.x; i++)
{
C_CPU[0] += C_CPU[i*block.x];
}
}
//Stopping the timer
gettimeofday(&end, NULL);
//Calculating the total time used in the addition and converting it to milliseconds.
float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec);
//Displaying the time
printf("Time in milliseconds= %.15f\n", (time/1000.0));
// Displaying vector info you will want to comment out the vector print line when your
//vector becomes big. This is just to make sure everything is running correctly.
for(i = 0; i < N; i++)
{
//printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]);
}
//Displaying the value of the dot product
printf("Value is %f\n", C_CPU[0]);
//You're done so cleanup your mess.
CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU);
return(0);
}
|
20,164 | #include "includes.h"
__global__ void Max (const int n, const float *top_temp, float *top_data, float *mask, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (top_data[index] < top_temp[index])
{
top_data[index] = top_temp[index];
mask[index] = mask_index;
}
} |
20,165 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
int main(int argc, char* argv[])
{
char* p = NULL;
char4* q = NULL;
int i = 0;
cudaError_t iRet;
p = (char*) malloc(100 * sizeof(char));
assert(p != NULL);
q = (char4*) malloc(25 * sizeof(char4));
assert(q != NULL);
for (i = 0; i < 100; ++i)
{
p[i] = i;
}
iRet = cudaMemcpy(q, p, 100 * sizeof(char), cudaMemcpyHostToHost);
for (i = 0; i < 100; ++i)
{
printf("%d ", p[i]);
}
printf("\n**********\n");
for (i = 0; i < 25; ++i)
{
printf("%d %d %d %d ", q[i].x, q[i].y, q[i].z, q[i].w);
}
printf("\n");
free(q);
free(p);
return 0;
}
|
20,166 | #include <stdio.h>
#include <cuda.h>
#include <math.h>
__global__ void WaveEq(float *d_mm1, float *d_m, float *d_mp1, float s,
float T, float dt, float cfl)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
int N=blockDim.x-1;
d_m[i]=d_mp1[i];
float t=0.0;
while (t < T) {
// Uncomment two lines below for absorbing boundary conditions
// d_mp1[0]=d_m[1]+((cfl-1)/(cfl+1))*(d_mp1[1]-d_m[0]);
// d_mp1[N]=d_m[N-1]+((cfl-1)/(cfl+1))*(d_mp1[N-1]-d_m[N]);
t=dt+t;
d_mm1[i]=d_m[i];
d_m[i]=d_mp1[i];
__syncthreads();
if (i>0 && i<(blockDim.x-1)) {
d_mp1[i]=2*d_m[i]-d_mm1[i]+s*(d_m[i-1]-2*d_m[i]+d_m[i+1]);
}
// Uncomment line below to include source on string
// d_m[int(N/2)]=0.1;
}
}
int main(int argc, char** argv)
{
const int n=100;
const int BYTES=n*sizeof(float);
float h_mm1[n];
float h_m[n];
float h_mp1[n];
float c=1.0;
float T=1.0;
float dx=0.1;
float dt=dx/c;
float cfl=c*dt/dx;
float s=cfl*cfl;
//initialize arrays
for (int i=0;i<n;i++)
{
h_mm1[i]=0.0;
h_m[i]=0.0;
h_mp1[i]=0.0;
}
h_mp1[48]=0.1f;
h_mp1[50]=0.1f;
h_mp1[49]=0.2f;
//declare GPU memory pointers
float* d_mm1;
float* d_m;
float* d_mp1;
//allocate memory on the device
cudaMalloc((void**)&d_mm1,BYTES);
cudaMalloc((void**)&d_m,BYTES);
cudaMalloc((void**)&d_mp1,BYTES);
//transfer the array to the GPU
//destination, source, size, method
cudaMemcpy(d_mm1,h_mm1,BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_m,h_m,BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_mp1,h_mp1,BYTES,cudaMemcpyHostToDevice);
//launch the kernel
WaveEq<<<1,n>>>(d_mm1,d_m,d_mp1,s,T,dt,cfl);
cudaDeviceSynchronize();
//copy the results back onto the device
//destination, source, size, method
cudaMemcpy(h_mm1,d_mm1,BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(h_m,d_m,BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(h_mp1,d_mp1,BYTES,cudaMemcpyDeviceToHost);
for (int i=0;i<n;i++)
{
printf("%d \t %f",i,h_mp1[i]);
printf("\n");
}
printf("\n");
//free memory previously allocated on the device
cudaFree(d_mm1);
cudaFree(d_m);
cudaFree(d_mp1);
} |
20,167 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define N 128
#define BLOCK_SIZE 16
#define BASE_TYPE float
__global__ void scalMult(const BASE_TYPE *A, const BASE_TYPE *B, BASE_TYPE *C ,int n)
{
//
BASE_TYPE sum = 0.0; //
__shared__ BASE_TYPE ash[BLOCK_SIZE];
//
ash[threadIdx.x] = A[blockIdx.x * blockDim.x + threadIdx.x]*B[blockIdx.x * blockDim.x + threadIdx.x];
if(threadIdx.x==0)
{
sum = 0.0;
for (int j = 0; j < blockDim.x; j++)
sum = sum+ash[j];
atomicAdd(C, sum);
}
}
int main()
{
BASE_TYPE *dev_a, *dev_b, *dev_c;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//
// GPU
size_t size = N* sizeof(BASE_TYPE);
BASE_TYPE *host_a = (BASE_TYPE *)malloc(size);
BASE_TYPE *host_b = (BASE_TYPE *)malloc(size);
BASE_TYPE *host_c = (BASE_TYPE *)malloc(size/N);
for (int i = 0; i<N; i++)
{
host_a[i] = 1;
host_b[i] = 5;
}
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size);
cudaMalloc( (void**)&dev_c, size/N);
// GPU
cudaMemcpy( dev_a, host_a, size, cudaMemcpyHostToDevice ) ;
cudaMemcpy( dev_b, host_b, size, cudaMemcpyHostToDevice ) ;
//
cudaEventRecord(start, 0);
scalMult <<<BLOCK_SIZE, N / BLOCK_SIZE>>>( dev_a, dev_b, dev_c,N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//
float KernelTime;
cudaEventElapsedTime( &KernelTime, start, stop);
printf("KernelTime: %.2f milliseconds\n", KernelTime);
cudaMemcpy(host_c, dev_c, size/N, cudaMemcpyDeviceToHost);
//
float S = 0.0;
for (int i = 0; i < N; i++)
{
S = S + host_a[i]*host_b[i];
}
printf( "<a,b>GPU = %f\n", *host_c);
printf("<a,b> CPU= %f\n", S);
//
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
getchar();
return 0;
} |
20,168 | #include<iostream>
#include<ctime>
#include<cmath>
#include<stdexcept>
using namespace std;
#define MaxElement 1000
__global__ void Sum(int* Array1, int* Array2, int* Result, int ElementCount){
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index < ElementCount)
Result[Index] = Array1[Index] + Array2[Index];
}
void HostVectorSum(int ArraySize=1000, int ThreadsPerBlock=100){
int ArrayMemory = ArraySize * sizeof(int);
int* HostArray1 = (int*) malloc(ArrayMemory);
int* HostArray2 = (int*) malloc(ArrayMemory);
int* HostResult = (int*) malloc(ArrayMemory);
int* DeviceArray1;
int* DeviceArray2;
int* DeviceResult;
srand(time(0));
for(int i=0;i<ArraySize;i++){
HostArray1[i] = rand() % MaxElement;
HostArray2[i] = rand() % MaxElement;
}
cudaMalloc(&DeviceArray1, ArrayMemory);
cudaMalloc(&DeviceArray2, ArrayMemory);
cudaMalloc(&DeviceResult, ArrayMemory);
cudaMemcpy(DeviceArray1, HostArray1, ArrayMemory, cudaMemcpyHostToDevice);
cudaMemcpy(DeviceArray2, HostArray2, ArrayMemory, cudaMemcpyHostToDevice);
int BlocksPerGrid = 1;
if(ArraySize > ThreadsPerBlock)
BlocksPerGrid = ceil(double(ArraySize) / double(ThreadsPerBlock));
Sum<<<BlocksPerGrid, ThreadsPerBlock>>>(DeviceArray1, DeviceArray2, DeviceResult, ArraySize);
cudaMemcpy(HostResult, DeviceResult, ArrayMemory, cudaMemcpyDeviceToHost);
cudaFree(DeviceArray1);
cudaFree(DeviceArray2);
cudaFree(DeviceResult);
for(int i=0;i<ArraySize;i++)
printf("Index %d --> %d + %d = %d\n", i+1, HostArray1[i], HostArray2[i], HostResult[i]);
free(HostArray1);
free(HostArray2);
free(HostResult);
}
__global__ void VectorMatrixMultiplication(int* Vector, int* Matrix, int* Result, int Row, int Column){
int Index = blockIdx.x * blockDim.x + threadIdx.x;
int Sum = 0;
if(Index < Column){
int ColumnStartIndex = Index * Row;
for(int i=0;i<Row;i++)
Sum += Vector[i] * Matrix[ColumnStartIndex + i];
Result[Index] = Sum;
}
}
void HostVectorMatrixMultiplication(int Row, int Column){
int* HostArray = (int*) malloc(Row * sizeof(int));
int* HostMatrix = (int*) malloc(Row * Column * sizeof(int));
int* HostResult = (int*) malloc(Column * sizeof(int));
int* DeviceArray;
int* DeviceMatrix;
int* DeviceResult;
srand(time(0));
for(int i=0;i<Row;i++)
HostArray[i] = rand() % MaxElement;
for(int i=0;i<Column;i++)
for(int j=0;j<Row;j++)
HostMatrix[i*Row+j] = rand() % MaxElement;
cudaMalloc(&DeviceArray, Row*sizeof(int));
cudaMalloc(&DeviceMatrix, Row*Column*sizeof(int));
cudaMalloc(&DeviceResult, Column*sizeof(int));
cudaMemcpy(DeviceArray, HostArray, Row*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(DeviceMatrix, HostMatrix, Row*Column*sizeof(int), cudaMemcpyHostToDevice);
VectorMatrixMultiplication<<<Column, 1>>>(DeviceArray, DeviceMatrix, DeviceResult, Row, Column);
cudaMemcpy(HostResult, DeviceResult, Column*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(DeviceArray);
cudaFree(DeviceMatrix);
cudaFree(DeviceResult);
for(int i=0;i<Column;i++)
printf("Index %d --> %d\n", i+1,HostResult[i]);
free(HostArray);
free(HostMatrix);
free(HostResult);
}
__global__ void MatrixMultiplication(int* MatrixA, int* MatrixB, int* Result, int Dimension){
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Column = blockIdx.x * blockDim.x + threadIdx.x;
int Sum = 0;
if(Row < Dimension && Column < Dimension){
for(int i=0;i<Dimension;i++)
Sum += MatrixA[Row * Dimension + i] * MatrixB[i * Dimension + Column];
__syncthreads();
Result[Row * Dimension + Column] = Sum;
}
}
void HostMatrixMultiplication(int Dimension){
int MatrixMemory = Dimension * Dimension * sizeof(int);
int* HostMatrixA = (int*) malloc(MatrixMemory);
int* HostMatrixB = (int*) malloc(MatrixMemory);
int* HostResult = (int*) malloc(MatrixMemory);
srand(time(0));
for(int i=0;i<Dimension;i++){
for(int j=0;j<Dimension;j++){
HostMatrixA[i * Dimension + j] = rand() % 30;
HostMatrixB[i * Dimension + j] = rand() % 30;
}
}
int* DeviceMatrixA;
int* DeviceMatrixB;
int* DeviceResult;
cudaMalloc(&DeviceMatrixA, MatrixMemory);
cudaMalloc(&DeviceMatrixB, MatrixMemory);
cudaMalloc(&DeviceResult, MatrixMemory);
cudaMemcpy(DeviceMatrixA, HostMatrixA, MatrixMemory, cudaMemcpyHostToDevice);
cudaMemcpy(DeviceMatrixB, HostMatrixB, MatrixMemory, cudaMemcpyHostToDevice);
dim3 ThreadsPerBlock(Dimension, Dimension);
dim3 BlocksPerGrid(1, 1);
MatrixMultiplication<<<BlocksPerGrid, ThreadsPerBlock>>>(DeviceMatrixA, DeviceMatrixB, DeviceResult, Dimension);
cudaError_t Exception = cudaGetLastError();
if(Exception != cudaSuccess){
printf("Cuda Error: %s", cudaGetErrorString(Exception));
return;
}
cudaDeviceSynchronize();
cudaMemcpy(HostResult, DeviceResult, MatrixMemory, cudaMemcpyDeviceToHost);
cudaFree(DeviceMatrixA);
cudaFree(DeviceMatrixB);
cudaFree(DeviceResult);
for(int i=0;i<Dimension;i++){
for(int j=0;j<Dimension;j++){
printf("%d ", HostResult[i * Dimension + j]);
}
printf("\n");
}
}
int main(){
int Choice;
printf("1.Vector Addition\n2.Vector Matrix Multiplication\n3.Matrix Multiplication\n4.Exit\n");
printf("Enter The Operation To Be Performed: : ");
scanf("%d", &Choice);
if(Choice==1){
int ArraySize;
printf("Enter The Array Size: : ");
scanf("%d", &ArraySize);
HostVectorSum(ArraySize);
}
else if(Choice==2){
int Row, Column;
printf("Enter The Rows And Columns Of The Matrix: : ");
scanf("%d %d", &Row, &Column);
HostVectorMatrixMultiplication(Row, Column);
}
else if(Choice==3){
int Dimension;
printf("Enter The Dimensions Of The Matrix: : ");
scanf("%d", &Dimension);
HostMatrixMultiplication(Dimension);
}
else
return 0;
return 0;
}
|
20,169 | /*
* FILE: isingV2.cu
* THMMY, 7th semester, Parallel and Distributed Systems: 3rd assignment
* Parallel Implementation of the Ising Model
* Authors:
* Moustaklis Apostolos, 9127, amoustakl@ece.auth.gr
* Papadakis Charis , 9128, papadakic@ece.auth.gr
* Compile command with :
* make all
* Run command example:
* ./src/isingV2
* It will calculate the evolution of the ising Model
* for a given number n of points and k steps
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// Defines for the block and grid calculation
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
//The dimensions of the lattice
#define N_X 517
#define N_Y 517
// The size of the weights
#define WSIZE 5
//Helper Defines to access easier the arrays
#define old(i,j,n) *(old+i*n+j)
#define current(i,j,n) *(current+i*n+j)
#define w(i,j) *(w+i*5+j)
#define d_w(i,j) *(d_w+i*5+j)
#define G(i,j,n) *(G+i*n+j)
#define d_current(i,j,n) *(d_current+i*n+j)
#define d_old(i,j,n) *(d_old+i*n+j)
//Functions Declaration
void swapElement(int ** one, int ** two);
__global__
void kernel2D(int *d_current, int *d_old, double *d_w, int n , int * d_flag);
void ising( int *G, double *w, int k, int n);
//! Ising model evolution
/*!
\param G Spins on the square lattice [n-by-n]
\param w Weight matrix [5-by-5]
\param k Number of iterations [scalar]
\param n Number of lattice points per dim [scalar]
NOTE: Both matrices G and w are stored in row-major format.
*/
void ising( int *G, double *w, int k, int n){
//Grid and block construction
dim3 block(BLOCK_SIZE_X,BLOCK_SIZE_Y);
int grid_size_x = (N_X + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X;
int grid_size_y = (N_Y + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y;
dim3 grid(grid_size_x,grid_size_y);
//Device memory allocation
int * old = (int*) malloc(n*n*(size_t)sizeof(int)); // old spin lattice
int * current = (int*) malloc(n*n*(size_t)sizeof(int)); // current spin lattice
//Leak check
if(old==NULL || current == NULL){
printf("Problem at memory allocation at host \n");
exit(0);
}
int * d_old;
int * d_current;
double * d_w;// na valw void ** skatakia
int *d_flag ;
int flag ;
//Host memory allocation and leak check
if( cudaMalloc((void **)&d_old ,n*n*(size_t)sizeof(int)) != cudaSuccess || cudaMalloc((void **)&d_current,n*n*(size_t)sizeof(int)) != cudaSuccess || cudaMalloc((void **)&d_w, WSIZE*WSIZE*(size_t)sizeof(double)) != cudaSuccess || cudaMalloc(&d_flag,(size_t)sizeof(int)) !=cudaSuccess){
printf("Problem at memory allocation");
exit(0);
}
//Copy memory from host to device
cudaMemcpy(d_w, w, WSIZE*WSIZE*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy(d_old, G, n*n*sizeof(int), cudaMemcpyHostToDevice );
// run for k steps
for(int l=0; l<k; l++){
flag = 0;
kernel2D<<<grid,block>>>(d_current, d_old, d_w, n , d_flag );
cudaDeviceSynchronize();
//cudaMemcpy(old, d_old, n*n*sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy(current, d_current, n*n*sizeof(int), cudaMemcpyDeviceToHost );
// save result in G
memcpy(G , current , n*n*sizeof(int));
// swap the pointers for the next iteration
swapElement(&d_old,&d_current);
cudaMemcpy(&flag , d_flag , (size_t)sizeof(int), cudaMemcpyDeviceToHost);
// terminate if no changes are made
if(flag){
printf("terminated: spin values stay same (step %d)\n" , l );
exit(0);
}
}
//Memory deallocation
free(old);
free(current);
cudaFree(d_old);
cudaFree(d_current);
cudaFree(d_w);
}
//Helper function to swap the pointers of the arrays
void swapElement(int ** one, int ** two) {
int * temp = * one;
* one = * two;
* two = temp;
}
//The kernel function that updates the values of the ising model
__global__
void kernel2D(int *d_current, int *d_old, double *d_w, int n , int * d_flag)
{
double influence = 0;
// Compute global column and row indices.
int r = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
// Check if within bounds.
if ((c >= n) || (r >= n))
return;
for(int i = r; i<n; i+=blockDim.x*gridDim.x){
for(int j = c; j<n; j+=blockDim.y*gridDim.y){
for(int ii=0; ii<5; ii++){
for(int jj=0; jj<5; jj++){
influence += d_w(ii,jj) * d_old((i-2+n+ii)%n, (j-2+n+jj)%n, n);
}
}
// magnetic moment gets the value of the SIGN of the weighted influence of its neighbors
if(fabs(influence) < 10e-7){
d_current(i,j,n) = d_old(i,j,n); // remains the same in the case that the weighted influence is zero
}
else if(influence > 10e-7){
d_current(i,j,n) = 1;
*d_flag = 0;
}
else if(influence < 0){
d_current(i,j,n) = -1;
*d_flag=0;
}
influence = 0;
}
}
}
|
20,170 | #include "includes.h"
__global__ void calcCDF(float *cdf, unsigned int *histo, int imageWidth, int imageHeight, int length) {
__shared__ float partialScan[SIZE_CDF];
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < SIZE_CDF && i < 256) {
partialScan[i] = (float) histo[i] / (float) (imageWidth * imageHeight);
}
__syncthreads();
for (unsigned int stride = 1; stride <= SIZE_HISTO; stride *= 2) {
unsigned int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index < SIZE_CDF && index < length)
partialScan[index] += partialScan[index - stride];
__syncthreads();
}
for (unsigned int stride = SIZE_HISTO / 2; stride > 0; stride /= 2) {
__syncthreads();
unsigned int index = (threadIdx.x + 1) * stride * 2 - 1;
if (index + stride < SIZE_CDF && index + stride < length) {
partialScan[index + stride] += partialScan[index];
}
}
__syncthreads();
if (i < SIZE_CDF && i < 256) {
cdf[i] += partialScan[threadIdx.x];
}
} |
20,171 | #include "includes.h"
__global__ void addTen(float* d, int count) {
int threadsPerBlock = blockDim.x * blockDim.y * blockDim.z;
int threadPosInBlock = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z;
int blockPosInGrid = blockIdx.x + gridDim.x * blockIdx.y + gridDim.x * gridDim.y * blockIdx.z;
int tid = blockPosInGrid * threadsPerBlock + threadPosInBlock;
if(tid < count) {
d[tid] = d[tid] + 10;
}
} |
20,172 |
// cuda-kernel: add 2 vectors
__global__ void addvecs (double *v1, double *v2){
int idx = threadIdx.x;
v1[idx] += v2[idx];
}
|
20,173 | /*Generates Gaussian 2D filter given a sigma and size for the discretized matrix.
*Corresponds to fspecial('gaussian') in Matlab.
* Should be improved by using reduction when retrieving the sum of the matrix
* instead of all threads looping over the full matrix.*/
__device__ void generate2DGaussian(double * output, double sigma, int sz, bool normalize) {
/*x and y coordinates of thread in kernel. The gaussian filters are
*small enough for the kernel to fit into a single thread block of sz*sz*/
const int colIdx = threadIdx.x;
const int rowIdx = threadIdx.y;
int linearIdx = rowIdx*sz + colIdx;
/*calculate distance from centre of filter*/
int distx = abs(colIdx - sz/2);
int disty = abs(rowIdx - sz/2);
output[linearIdx] = exp(-(pow((double)(distx), 2.0)+pow((double)(disty), 2.0))/(2*(pow(sigma, 2.0))));
if(normalize==true) {
/*wait until all threads have assigned a value to their index in the output array*/
__syncthreads();
int i, j;
double sum=0.0;
for(i=0; i<sz; i++) {
for(j=0; j<sz; j++) {
sum += output[i*sz + j];
}
}
/*Let all threads calculate the sum before changing the value of the output array*/
__syncthreads();
output[linearIdx]/=sum;
}
}
|
20,174 | #include "includes.h"
__global__ void ArraySum(float *array, float *sum){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N){
atomicAdd(sum, array[index]);
}
} |
20,175 | #include "stdio.h"
#include <cuda.h>
//#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
//# error "printf not defined for CUDA_ARCH" ##__CUDA_ARCH__
//#endif
__global__ void helloCuda( float f )
{
printf( "Hello thread %d, f = %f\n", threadIdx.x, f );
}
#ifdef __CUDACC__
#warning cuda cc
#endif
int main( int argc, char** argv )
{
helloCuda<<< 1, 5 >>>( 3.14159 );
cudaDeviceSynchronize();
return 0;
}
|
20,176 | /*
* Alex Laubscher
* Gillespie Algorithm
*/
#include <stdio.h>
#include <time.h>
#define SIZE 1024
int main(void) {
// Initializing variables for the while loop
double counter;
int total;
double tau;
double sample;
// Initial population
int pop = 0;
// Initializing time
double time = 0;
double maxTime = 100000;
// Moved this outside because its going to be constant
int birth = 1000;
// Starting the timer
clock_t time_elapsed = clock();
// Run the while loop over 100,000 simulation seconds
while (time < maxTime) {
// Sum over the propensities
total = birth + pop;
// Calculate time step
tau = (1.0 / total) * log((double) rand() / (RAND_MAX));
// Second random choice
sample = total * ((double) rand() / (RAND_MAX));
// Update populations based on second urn
if (sample < birth) {
pop++;
} else {
pop--;
}
// Update the time step
time = time - tau;
// Increment the counter
counter++;
}
// End the time and convert to sec
time_elapsed = (clock() - time_elapsed);
double timer = ((double) time_elapsed) / CLOCKS_PER_SEC;
// Calculate the reactions per sec
double rate = counter / timer;
printf("Population: %d\n", pop);
printf("Counter: %f\n", counter);
printf("Timer: %f\n", timer);
printf("Rate: %f\n", rate);
}
|
20,177 | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
int main(int argc, char** argv) {
using namespace std;
using namespace thrust;
long n = atol(argv[1]);
thrust::host_vector<float> h_vec(n);
thrust::fill(h_vec.begin(), h_vec.end(), 1);
thrust::host_vector<float> h_res(n);
// cout << "start copying" << endl;
thrust::device_vector<float> d_vec(n);
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
thrust::device_vector<float> d_intermediate(n);
thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_intermediate.begin());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
thrust::copy(d_intermediate.begin(), d_intermediate.end(), h_res.begin());
cout << h_res[n - 1] << endl;
cout << ms << endl;
}
|
20,178 | #include "includes.h"
__global__ void gaussian_filter(unsigned *in, unsigned *out, int width, int height){
__shared__ int cikti;
cikti = 0;
__syncthreads();
cikti += in[blockIdx.y*width*2 + blockIdx.x*2 + threadIdx.y*width + threadIdx.x];
__syncthreads();
out[blockIdx.y*width/2 + blockIdx.x] = cikti; // ciktiyi bir sayiya boldugumde garip bir sekilde resim karariyor???(oysa 4 sayiyi topluyoruz, neden ort almiyoruz???)
} |
20,179 | // REQUIRES: clang-driver
// Check that we raise an error if we're trying to compile CUDA code but can't
// find a CUDA install, unless -nocudainc was passed.
// RUN: %clang -### --sysroot=%s/no-cuda-there %s 2>&1 | FileCheck %s --check-prefix ERR
// RUN: %clang -### --cuda-path=%s/no-cuda-there %s 2>&1 | FileCheck %s --check-prefix ERR
// ERR: cannot find CUDA installation
// RUN: %clang -### -nocudainc --sysroot=%s/no-cuda-there %s 2>&1 | FileCheck %s --check-prefix OK
// RUN: %clang -### -nocudainc --cuda-path=%s/no-cuda-there %s 2>&1 | FileCheck %s --check-prefix OK
// OK-NOT: cannot find CUDA installation
|
20,180 | #include <stdio.h>
#include <unistd.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
for (int i = 0; i < 5; ++i)
{
cudaStream_t stream;
cudaStreamCreate(&stream);
printNumber<<<1, 1, 0, stream>>>(i);
cudaStreamDestroy(stream);
}
cudaDeviceSynchronize();
}
|
20,181 | #include "includes.h"
__global__ void mInitForce(float *f_dimX, float *f_dimY) {
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = (float)threadIdx.x;
float y = (float)blockIdx.x;
float length = sqrt((float)((x-320)*(x-320))+(float)((y-240)*(y-240)));
if(length < SWIRL_RADIUS) {
f_dimX[Idx] = (240.0-y)/length;
f_dimY[Idx] = (x-320.0)/length;
} else {
f_dimX[Idx] = f_dimY[Idx] = 0.f;
}
} |
20,182 | /*
Short job 1 MI-PRC, 2019/2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <math.h>
struct atom{
float x,y,z,charge;
//x,y,z, a naboj pro kazdy atom
//x,y,z, and charge info for each atom
};
struct grid_s{
int size_x,size_y,size_z;
// rozmery gridu (mrizky))
// sizes of grid (in x-,y-,and z- dimension)
float spacing_x,spacing_y,spacing_z;
// mezibodova vzdalenost v gridu
// distances in grid
float offset_x,offset_y,offset_z;
// posun gridu
// offsets of grid
float * pot;
float * d_pot;
// vypocitany potencial v CPU a GPU pameti
// computed potential in grid points
} grid;
struct atom * atoms;
struct atom * d_atoms;
int no_atoms;
// pocet atomu a pole s jejich parametry v CPU a GPU pameti
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void init()
{
// setup the grid and atoms
grid.spacing_x=0.15;
grid.offset_x=0.5;
grid.spacing_y=0.08;
grid.offset_y=-0.4;
grid.spacing_z=0.22;
grid.offset_z=0.3;
for (int na=0; na<no_atoms; na++) {
atoms[na].x=(na%47)+0.229;
atoms[na].y=(na%31)-10.29;
atoms[na].z=(na%19)+50.311;
atoms[na].charge=(na%8)+0.5;
}}
float body(float t,int n)
{
float b;
if (n<5) return 0.0;
if (t>6.0) return 0.0;
b=12.0*1.6/t;
if (b>18.0) b=18.0;
return b;
}
// zacatek casti k modifikaci
// beginning of part for modification
// muzete pridat vlastni funkce nebo datove struktury, you can also add new functions or data structures
__global__
void compute(int gsizex, int gsizey, int gsizez, float gsx, float gsy, float gsz, float gox, float goy, float goz, struct atom* atoms, int no_atoms, float* gpot) {
__shared__ struct atom atomcache[1024];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z/2 + threadIdx.z/2;
int numThreads = blockDim.x * blockDim.y * blockDim.z/2;
int threadId = threadIdx.z/2 * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// umisteni bodu, location of grid point
float x = gsx * (float) i + gox;
float y = gsy * (float) j + goy;
float z = gsz * (float) k + goz;
float pot = 0.0f;
gpot[k * gsizex * gsizey + j * gsizex + i] = 0.0f;
for (int offset = 0; offset < no_atoms; offset += numThreads) {
__syncthreads();
if (offset + threadId < no_atoms) {
atomcache[threadId] = atoms[offset + threadId];
}
__syncthreads();
if (threadIdx.z % 2 == 0) {
for (int na = 0; na < min(numThreads, no_atoms - offset) / 2; na++) {
float dx = x - atomcache[na].x;
float dy = y - atomcache[na].y;
float dz = z - atomcache[na].z;
float charge = atomcache[na].charge;
pot += charge / sqrt(dx * dx + dy * dy + dz * dz);
}
}
else {
for (int na = min(numThreads, no_atoms - offset) / 2; na < min(numThreads, no_atoms - offset); na++) {
float dx = x - atomcache[na].x;
float dy = y - atomcache[na].y;
float dz = z - atomcache[na].z;
float charge = atomcache[na].charge;
pot += charge / sqrt(dx * dx + dy * dy + dz * dz);
}
}
}
if (i < gsizex && j < gsizey && k < gsizez) {
atomicAdd(&gpot[k * gsizex * gsizey + j * gsizex + i], pot);
}
}
void c_energy(int gsizex, int gsizey, int gsizez, float gsx, float gsy, float gsz, float gox, float goy, float goz, struct atom* atoms, int no_atoms, float* gpot) {
int tot = gsizex * gsizey * gsizez;
dim3 grid((gsizex + 7) / 8, (gsizey + 7) / 8, (gsizez + 7) / 8);
dim3 block_size(8, 8, 16);
compute<<<grid, block_size>>>(gsizex, gsizey, gsizez, gsx, gsy, gsz, gox, goy, goz, atoms, no_atoms, gpot);
// compute<<<1, 1, 512 * sizeof(struct atom)>>>(gsizex, gsizey, gsizez, gsx, gsy, gsz, gox, goy, goz, atoms, no_atoms, gpot);
}
// end of part for modification
// konec casti k modifikaci
int check(int N,float *correct,int gsizex,int gsizey,int gsizez,float *gpot){
// overeni spravnosti, check the correctness
float crc[8];
int si,si2;
for (int i=0; i<8; i++) crc[i]=0.0;
for (int i=0; i<grid.size_x; i++) {
for (int j=0; j<grid.size_y; j++) {
for (int k=0; k<grid.size_z; k++) {
float x=gpot[(k)*gsizex*gsizey+(j)*gsizex + (i)];//DATA(i,j,k);
si=(i&1)+(j&1)*2+(k&1)*4;
/*si2=(i&2)^(j&2)^(k&2);
if (si2) crc[si]+=x;
else crc[si]-=x;
*/
crc[si]+=x;
}}}
/*
for (int i=0; i<8; i++) printf("%g,",crc[i]);
printf("\n");
for (int i=0; i<8; i++) printf("%g,%g ",crc[i],correct[N*10+i]);
printf("\n");
*/
for(int i=0;i<8;i++)
if (fabs(1.0-crc[i]/correct[N*10+i])>0.06)
{
printf("ERROR in CRC!!!!\n");
return 1;
}
return 0;
}
int main( void ) {
clock_t start_time,end_time;
int soucet=0,N,i,j,k,n,m,*pomo,v;
int ri,rj,rk;
double delta,s_delta=0.0,timea[16];
float *mA, *mB,*mX,*mX2,s;
//int tn[4]={1000,1500,2000,2500};
float correct[50]={128619,128714,128630,128725,129043,129139,129054,129150, 0, 0,
1.2849e+06,1.28585e+06,1.28501e+06,1.28596e+06,1.28913e+06,1.29009e+06,1.28924e+06,1.2902e+06, 0, 0,
1.285e+08,1.28594e+08,1.28511e+08,1.28605e+08,1.28917e+08,1.29012e+08,1.28929e+08,1.29024e+08, 0, 0,
2.09323e+08,2.09448e+08,2.09287e+08,2.09413e+08,2.10481e+08,2.10609e+08,2.10445e+08,2.10573e+08, 0, 0,
2.31853e+08,2.32026e+08,2.31867e+08,2.3204e+08,2.327e+08,2.32875e+08,2.32715e+08,2.3289e+08, 0, 0};
int tgx[5]={20,20,20,200,64};
int tgy[5]={20,20,20,200,64};
int tgz[5]={20,20,20,200,64};
int ta[5]={2000,20000,2000000,2000,100000};
// 16*10^8,81*10^8,64*10^8,52*10^8
srand (time(NULL));
pomo=(int *)malloc(32*1024*1024);
v=0;
for(N=0;N<16;N++) timea[N]=0.0;
float s_t=0.0;
for(N=0;N<5;N++)
{
grid.size_x=tgx[N];
grid.size_y=tgy[N];
grid.size_z=tgz[N];
no_atoms=ta[N];
atoms=(struct atom *)malloc(no_atoms * sizeof(struct atom));
HANDLE_ERROR(cudaMalloc(&d_atoms, no_atoms * sizeof(struct atom)));
if ((atoms==NULL)||(d_atoms==NULL))
{
printf("Alloc error\n");
return 0;
}
grid.pot=(float *)malloc(grid.size_x * grid.size_y * grid.size_z * sizeof(float));
HANDLE_ERROR(cudaMalloc(&grid.d_pot, grid.size_x * grid.size_y * grid.size_z * sizeof(float)));
if ((grid.pot==NULL)||(grid.d_pot==NULL))
{
printf("Alloc error\n");
return 0;
}
init();
HANDLE_ERROR(cudaMemcpy(d_atoms, atoms, no_atoms * sizeof(struct atom), cudaMemcpyHostToDevice));
//soucet+=vyprazdni(pomo,v);
start_time=clock();
// improve performance of this call
// vylepsit vykonnost tohoto volani
c_energy(grid.size_x,grid.size_y,grid.size_z,grid.spacing_x,grid.spacing_y,grid.spacing_z,grid.offset_x,grid.offset_y,grid.offset_z,d_atoms,no_atoms,grid.d_pot);
cudaDeviceSynchronize();
end_time=clock();
HANDLE_ERROR(cudaMemcpy(grid.pot, grid.d_pot, grid.size_x * grid.size_y * grid.size_z * sizeof(float), cudaMemcpyDeviceToHost));
delta=((double)(end_time-start_time))/CLOCKS_PER_SEC;
timea[N]=delta;
s_t+=delta;
rj=check(N,correct,grid.size_x,grid.size_y,grid.size_z,grid.pot);
if (rj==1)
{
printf("BAD result!\n");
return 0;
}
free(atoms);
free(grid.pot);
cudaFree(d_atoms);
cudaFree(grid.d_pot);
if (s_t>6.0)
{
printf("Time limit (6 seconds) is reached (time=%g s). SJ1 points: 0\n",s_t);
return 0;
}
} // end of N
printf("%i\n",soucet);
for(N=0;N<5;N++)
{
printf("Time %i=%g",N,timea[N]);
if (N>=2)
{
delta=11;
delta*=tgx[N];
delta*=tgy[N];
delta*=tgz[N];
delta*=ta[N];
delta/=timea[N];
printf(" Perf=%g",delta);
}
printf("\n");
}
printf("Sum of time=%g\n",s_t);
printf("SJ1 points:%.2f\n",body(s_t,5));
return 0;
}
|
20,183 | #include <stdio.h>
#include <stdlib.h>
__global__ void calculate(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] += d_buffer[ix];
__syncthreads();
}
__global__ void compute1(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] = d_buffer[ix]*2;
__syncthreads();
}
__global__ void compute2(int *d_buffer){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buffer[ix] = d_buffer[ix]*10;
__syncthreads();
}
int main(int argc, char *argv[]){
int *matriz_1,*matriz_2,*h_matriz;
int N,i,j,iter;
int loop;
//dimensão da matriz
N = atoi(argv[1]);
//numero de iterações
iter = atoi(argv[2]);
loop = atoi(argv[3]);
h_matriz = (int*) malloc(sizeof(int)*N*N);
cudaMalloc(&matriz_1,sizeof(int)*N*N);
for(i=0;i<N;i++){
for(j=0;j<N;j++){
h_matriz[i*N+j] = (i*N+j)+1;
}
}
cudaMemcpy(matriz_1,h_matriz,N*N*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid, block;
block.x = 1024;
grid.x = (N + block.x - 1) / block.x;
for(i=0;i<iter;i++){
for(j=0;j<loop;j++){
calculate<<<grid,block>>>(matriz_1);
}
for(j=0;j<loop;j++){
compute1<<<grid,block>>>(matriz_1);
}
for(j=0;j<loop;j++){
compute2<<<grid,block>>>(matriz_1);
}
}
return 0;
}
|
20,184 | extern "C" __global__ void sparseMultiply(double* values, int* colIdx, int* rowStart, double* result, double* x, double alpha, double beta, int size) {
// Dynamically allocated shared memory, should be BlockDim.x + 1 ints (see cuFuncSetSharedSize host code)
extern __shared__ int sharedRowStart[];
// Indices
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
double rowacc = 0.0;
// Each thread loads one element of rowStart
if(idx < size) {
sharedRowStart[tid] = rowStart[idx];
}
// The first thread loads additionally the next element, needed by the last thread
if(tid == 0) {
int lastIdx = min((blockIdx.x + 1) * blockDim.x, size);
sharedRowStart[blockDim.x] = rowStart[lastIdx];
}
__syncthreads();
if(idx < size) {
// Multiply and sum up data of this row
for(int i = sharedRowStart[tid]; i < sharedRowStart[tid + 1]; i++) {
rowacc += values[i] * x[colIdx[i]];
}
result[idx] = rowacc * alpha;
}
}
|
20,185 | #include <stdio.h>
#include <cuda.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
cudaMalloc((void **)&a, N * sizeof(int));
cudaMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
cudaMemcpy(hA, a, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(hB, b, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
cudaFree(a);
cudaFree(b);
}
|
20,186 | #include "includes.h"
__global__ void chol_kernel_optimized(float * U, int k, int stride) {
//With stride...
//Iterators
unsigned int j;
unsigned int num_rows = MATRIX_SIZE;
//This call acts as a single K iteration
//Each block does a single i iteration
//Need to consider offset,
int i = blockIdx.x + (k + 1);
//Each thread does some part of j
//Stide in units of 'stride'
//Thread 0 does 0, 16, 32
//Thread 1 does 1, 17, 33
//..etc.
int offset = i; //From original loop
int jstart = threadIdx.x + offset;
int jstep = stride;
//Only continue if in bounds?
//Top limit on i for whole (original) loop
int jtop = num_rows - 1;
//Bottom limit on i for whole (original) loop
int jbottom = i;
//Do work for this i iteration
//Want to stride across
for (j = jstart; (j >= jbottom) && (j <= jtop); j += jstep) {
U[i * num_rows + j] -= U[k * num_rows + i] * U[k * num_rows + j];
}
} |
20,187 | #include "includes.h"
__global__ void blur(int* B,int* G,int* R, int* RB,int* RG,int* RR, int* K, int rows, int cols, int krows, int kcols) {
int index = blockIdx.x * 1024 + threadIdx.x;
if (index > rows*cols)
return;
int pixel_row = index/cols ;
int pixel_col = index - pixel_row*cols;
int pr,pc,idx;
int k_sum = 0;
int kr,kc;
int k_center_row = (krows-1)/2;
int k_center_col = (kcols-1)/2;
for(int i=0;i<krows;i++) {
for(int j=0;j<kcols;j++) {
kr = (i - k_center_row);
kc = (j - k_center_col);
pr = pixel_row + kr ;
pc = pixel_col + kc ;
idx = pr*cols + pc;
if (pr >=0 && pr < rows && pc>=0 && pc < cols) {
k_sum += K[kr*kcols + kc];
RB[index] += B[idx]*K[kr*kcols + kc];
RG[index] += G[idx]*K[kr*kcols + kc];
RR[index] += R[idx]*K[kr*kcols + kc];
}
}
}
RB[index] /= k_sum;
RG[index] /= k_sum;
RR[index] /= k_sum;
} |
20,188 | #include "includes.h"
__global__ void __initSeq(int *A, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = i % nrows;
}
} |
20,189 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <random>
//#include <conio.h>
//
//
//#define N 2048
//#define NB_THREADS 1024
//
//__global__ void multVect(int* result, int* a, int* b) {
// int idx = threadIdx.x + blockIdx.x * blockDim.x;
// if(idx <N) result[idx] = a[idx] * b[idx];
//}
//
//
//int main() {
//
// int *d_vect1, *d_vect2, *d_vect3;
//
// const int size = N * sizeof(int);
//
// cudaMallocManaged(&d_vect1, size);
// cudaMallocManaged(&d_vect2, size);
// cudaMallocManaged(&d_vect3, size);
//
// for (int i = 0; i < N; ++i)
// {
// d_vect1[i] = rand() % (int)floor(sqrt(INT_MAX));
// d_vect2[i] = rand() % (int)floor(sqrt(INT_MAX));
// }
//
//
// multVect <<<N/ NB_THREADS, NB_THREADS >> >(d_vect3, d_vect1, d_vect2);
//
// cudaDeviceSynchronize();
//
// for (int i = 0; i < N; ++i)
// {
// printf("%d*%d=%d ", d_vect1[i], d_vect2[i], d_vect3[i]);
// }
//
// cudaFree(d_vect1);
// cudaFree(d_vect2);
// cudaFree(d_vect3);
//
// _getch();
// return 0;
//} |
20,190 | #include <stdio.h>
#define NSTREAM 4
#define BDIM 128
void printArray(float *a, int size){
for (int i = 0; i < size; i++){
if(i % 128 == 0)
printf("\n");
printf("%.0f ", a[i]);
}
printf("\n\n");
}
__global__ void sumArrays(float *A, float *B, float *C, const int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
if( idx == N-1)
C[idx] = A[idx+2] + B[idx+2];
else
C[idx] = A[idx] + B[idx];
}
}
int main(int argc, char **argv)
{
printf("> %s Starting...\n", argv[0]);
// set up data size of vectors
int nElem = 1 << 9;
printf("> vector size = %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// malloc pinned host memory for async memcpy
float *h_A, *h_B, *gpuRef;
cudaHostAlloc((void**)&h_A, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_B, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&gpuRef, nBytes, cudaHostAllocDefault);
// initialize data at host side
for (int i = 0; i < nElem; i++)
{
h_A[i] = h_B[i] = i;
}
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// invoke kernel at host side
dim3 block (BDIM);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("> grid (%d, %d) block (%d, %d)\n", grid.x, grid.y, block.x,
block.y);
// sequential operation
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
sumArrays<<<grid, block>>>(d_A, d_B, d_C, nElem);
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
printf("\n");
printArray(gpuRef, nElem);
// grid parallel operation
int iElem = nElem / NSTREAM;
size_t iBytes = iElem * sizeof(float);
grid.x = (iElem + block.x - 1) / block.x;
cudaStream_t stream[NSTREAM];
for (int i = 0; i < NSTREAM; ++i)
{
cudaStreamCreate(&stream[i]);
}
// initiate all work on the device asynchronously in depth-first order
for (int i = 0; i < NSTREAM; ++i)
{
int ioffset = i * iElem;
cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], iBytes,
cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(&d_B[ioffset], &h_B[ioffset], iBytes,
cudaMemcpyHostToDevice, stream[i]);
sumArrays<<<grid, block, 0, stream[i]>>>(&d_A[ioffset], &d_B[ioffset],
&d_C[ioffset], iElem);
cudaMemcpyAsync(&gpuRef[ioffset], &d_C[ioffset], iBytes,
cudaMemcpyDeviceToHost, stream[i]);
}
// check kernel error
cudaGetLastError();
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(gpuRef);
// destroy streams
for (int i = 0; i < NSTREAM; ++i)
{
cudaStreamDestroy(stream[i]);
}
cudaDeviceReset();
return(0);
} |
20,191 | #include "includes.h"
__global__ void CudaImageSmooth(unsigned char *In, unsigned char *Out, int width, int height, int fsize)
{
int row, col, destIndex;
col = blockIdx.x*blockDim.x + threadIdx.x;
row = blockIdx.y*blockDim.y + threadIdx.y;
destIndex = row*width + col;
int frow, fcol;
float tmp = 0.0;
if(col < fsize/2 || col > width-fsize/2 || row < fsize-2 || row > width-fsize/2) {
Out[destIndex] = 0;
} else {
for(frow = -fsize/2; frow <= fsize/2; frow++) {
for(fcol = -fsize/2; fcol <= fsize/2; fcol++) {
tmp += (float)In[(row+frow)*width+(col+fcol)];
}
}
tmp /= (fsize*fsize); // average
Out[destIndex] = (unsigned char)tmp;
}
} |
20,192 | //
// Created by Peter Rigole on 2019-05-03.
//
#include "InputProcessor.cuh"
InputProcessor::InputProcessor() {}
InputProcessor::InputProcessor(NeuralNet* neuralNet_init) : neuralNet(neuralNet_init) {}
void InputProcessor::setNeuralNet(NeuralNet* neuralNet_update) {
neuralNet = neuralNet_update;
}
void InputProcessor::processInput() {
}
|
20,193 | #include <curand_kernel.h>
#include <curand.h>
#include <stdio.h>
#define NUM_PARTICLES 10000
#define NUM_ITERATIONS 10000
#define TPB 256
#define N (NUM_PARTICLES/TPB + 1)
struct Particle {
float position_x;
float position_y;
float position_z;
float velocity_x;
float velocity_y;
float velocity_z;
};
__global__ void simulate(Particle* particles) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
Particle* p = &particles[id];
curandState state;
curand_init(id, id, 0, &state);
p->velocity_x += curand_uniform(&state);
p->velocity_y += curand_uniform(&state);
p->velocity_z += curand_uniform(&state);
p->position_x += p->velocity_x;
p->position_y += p->velocity_y;
p->position_z += p->velocity_z;
}
int main()
{
Particle *particles = new Particle[NUM_PARTICLES];
Particle *d_particles = new Particle[NUM_PARTICLES];
//cudaMalloc(&d_particles, sizeof(Particle) * NUM_PARTICLES);
cudaMallocHost(&d_particles, sizeof(Particle) * NUM_PARTICLES);
for (int i = 0; i < NUM_ITERATIONS; i++) {
cudaMemcpy(d_particles, particles, sizeof(Particle) * NUM_PARTICLES, cudaMemcpyHostToDevice);
simulate<<<N, TPB>>>(d_particles);
cudaMemcpy(particles, d_particles, sizeof(Particle) * NUM_PARTICLES, cudaMemcpyDeviceToHost);
}
} |
20,194 |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <time.h>
/********************************************************************
CUDA Kernel
*********************************************************************/
__global__ void matrixMul (float* C, float* A, float* B, int TA)
{
/* calcul des coordonnees du point de C a calculer */
int i = blockIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
/* calcul de C[i][j] */
int cc = 0;
for (int k = 0; k < TA; ++ k)
cc += A[i * TA + k] * B[k * TA + j];
/* stockage */
C[i * TA + j] = cc;
}
/********************************************************************
Programme main
*********************************************************************/
int main (int argc, char** argv)
{
int i, j, TM, GRID_SIZE_X, GRID_SIZE_Y, BLOCK_SIZE_X;
cudaError_t cerror;
const int THREADS_PER_BLOCK = 1024;
//
/* pour le calcul du temps de traitement sur GPU */
float tc;
cudaEvent_t depart, arret;
cudaEventCreate(&depart);
cudaEventCreate(&arret);
/* valeurs par defaut */
TM = 2048;
/* TM peut etre lu comme arg1 de la commande */
if (argc > 1) {
TM = atoi(argv[1]);
}
GRID_SIZE_X = TM / THREADS_PER_BLOCK;
GRID_SIZE_Y = TM;
BLOCK_SIZE_X = THREADS_PER_BLOCK;
/* definiton de la grille et des blocs */
dim3 grid(GRID_SIZE_X, GRID_SIZE_Y);
dim3 block(BLOCK_SIZE_X);
printf("taille grille : %d - %d \n", GRID_SIZE_X, GRID_SIZE_Y);
printf("taille bloc : %d \n", BLOCK_SIZE_X);
/* allocation des matrices sur CPU */
unsigned int msize_A = TM * TM * sizeof(float);
unsigned int msize_B = TM * TM * sizeof(float);
unsigned int msize_C = TM * TM * sizeof(float);
float* h_A = (float*) malloc(msize_A);
float* h_B = (float*) malloc(msize_B);
float* h_C = (float*) malloc(msize_C);
/* initialisation des matrices avec des valeurs permettant de verifier le resultat*/
for (i = 0; i < TM; i++){
for (j = 0; j < TM; j++){
h_A[i * TM + j] = 1.0;
h_B[i * TM + j] = 1.0;
h_C[i * TM + j] = 0.0;
if (i == j) {
h_A[i * TM + j] = (float) (i + 1);
h_B[i * TM + j] = (float) (i + 1);
}
}
}
/* allocation des matrices sur GPU */
float *d_A; cudaMalloc((void**) &d_A, msize_A);
float *d_B; cudaMalloc((void**) &d_B, msize_B);
float *d_C; cudaMalloc((void**) &d_C, msize_C);
/* mesure du temps : top depart */
cudaEventRecord(depart, 0);
/* copie des matrives A et B depuis le CPU vers le GPU */
cudaMemcpy(d_A, h_A, msize_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, msize_B, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, msize_C, cudaMemcpyHostToDevice);
/* lancement des threads */
matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM);
/* Recuperation valeur de retour GPU */
cerror = cudaGetLastError();
printf(" retour %d \n", (int) cerror);
/* copie de la matrice C depuis le GPU */
cudaMemcpy(h_C, d_C, msize_C, cudaMemcpyDeviceToHost);
/* mesure du temps */
cudaEventRecord(arret, 0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc, depart, arret);
printf("Temps calcul : %f seconde\n", tc / 1000.0);
/* verification du resultat */
for (i = 0; i < TM; i++) {
for (j = 0; j < TM; j++) {
if ((i == j) && (h_C[i * TM + j] != (float)((i + 1) * (i + 1) + TM - 1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i * TM + j] ); exit(1);
}
else if ((i != j) && (h_C[i * TM + j] != (float)(i + j + TM))) {
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
/* liberation de la memoire */
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventDestroy(depart);
cudaEventDestroy(arret);
}
|
20,195 | #include "includes.h"
__global__ void sub_i32 (int* left_op, int* right_op, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] - right_op[idx];
}
} |
20,196 | #ifndef CUDA_SYN_BLOCK_CU_
#define CUDA_SYN_BLOCK_CU_
#include <cuda.h>
/*http://aggregate.org/MAGIC/#GPU SyncBlocks*/
__device__ void __syncblocks(volatile unsigned int *barnos)
{
/* First, sync within each Block */
__syncthreads();
/* Pick a representative from each (here, 1D) block */
if (threadIdx.x == 0) {
/* Get my barrier number */
int barno = barnos[blockIdx.x] + 1;
int hisbarno;
int who = (blockIdx.x + 1) % gridDim.x;
/* Check in at barrier */
barnos[blockIdx.x] = barno;
/* Scan for all here or somebody passed */
do {
/* Wait for who */
do {
hisbarno = barnos[who];
} while (hisbarno < barno);
/* Bump to next who */
if (++who >= gridDim.x) who = 0;
} while ((hisbarno == barno) && (who != blockIdx.x));
/* Tell others we are all here */
barnos[blockIdx.x] = barno + 1;
}
/* Rejoin with rest of my Block */
__syncthreads();
}
#endif
|
20,197 | /*
============================================================================
Filename : algorithm.c
Author : Floriane Gabriel & Julien von Felten
SCIPER : 248112 & 234865
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
__global__ void GPU_calculation(double*input, double* output, int lenght);
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
double* d_input; double *d_output;
int length_2 = length*length;
if(cudaMalloc(&d_input, length_2 * sizeof(double)) != cudaSuccess){
cout<<"error in cudaMalloc"<<endl;
}
if(cudaMalloc(&d_output, length_2 * sizeof(double)) != cudaSuccess){
cout<<"error in cudaMalloc"<<endl;
}
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
if(cudaMemcpy(d_input, input, length_2 * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess){
cout<<"error in cudaMemcpy H2D"<<endl;
}
if(cudaMemcpy(d_output, output, length_2 * sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess){
cout<<"error in cudaMemcpy H2D"<<endl;
}
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
//Copy array from host to device
cudaEventRecord(comp_start);
/* GPU calculation goes here */
for (int i = 0 ; i < iterations; i ++){
GPU_calculation<<<length, length>>>(d_input, d_output, length);
double* temp = d_input;
d_input = d_output;
d_output = temp;
}
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
if(cudaMemcpy(input, d_input, length_2 * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess){
cout<<"error in cudaMemcpy D2H"<<endl;
}
if (cudaMemcpy(output, d_output, length_2 * sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess){
cout<<"error in cudaMemcpy D2H"<<endl;
}
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
cudaFree(d_input);
cudaFree(d_output);
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
__global__ void GPU_calculation(double* input, double* output, int length)
{
int x_global = (blockIdx.x * blockDim.x) + threadIdx.x;
int y_global = (blockIdx.y * blockDim.y) + threadIdx.y;
output[(y_global)*(length)+(x_global)] = (input[(y_global-1)*(length)+(x_global-1)] +
input[(y_global-1)*(length)+(x_global)] +
input[(y_global-1)*(length)+(x_global+1)] +
input[(y_global)*(length)+(x_global-1)] +
input[(y_global)*(length)+(x_global)] +
input[(y_global)*(length)+(x_global+1)] +
input[(y_global+1)*(length)+(x_global-1)] +
input[(y_global+1)*(length)+(x_global)] +
input[(y_global+1)*(length)+(x_global+1)] ) / 9;
/* center of the grid*/
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
/* border of the grid*/
/*for(int n = 0 ; n < length; n++){
output[n] = 0; //first row : y_global = 0
output[(length-1)*length + n] = 0; //last row : y_global = length - 1
output[n*length] = 0; //first column : x_global = 0
output[n*length + (length-1)] = 0; // last row : x_global = length - 1
}*/
if(x_global == 0 or y_global == 0 or x_global == length - 1 or y_global == length - 1){
output[(y_global)*(length)+(x_global)] = 0;
}
}
|
20,198 | #include "includes.h"
/*
* This example explains how to divide the host and
* device code into separate files using vector addition
*/
#define N 64
__global__ void addKernel(float *a,float *b) {
int idx=threadIdx.x+blockIdx.x*blockDim.x;
if(idx>=N) return;
a[idx]+=b[idx];
} |
20,199 | #include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
/*
* Malloc
* wrapper for the malloc function
* If malloc returns NULL then the memory allocation failed.
*
*/
void * Malloc(size_t size)
{
void * allocData = (void *) malloc(size);
if (allocData == NULL)
{
printf("malloc failed: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
return allocData;
}
|
20,200 | #include <stdio.h>
//void __global__ kernel_add_one(int* a, int length) {
// int gid = threadIdx.x + blockDim.x*blockIdx.x;
//
// while(gid < length) {
// a[gid] += 1;
// gid += blockDim.x*gridDim.x;
// }
//}
void __global__ testKernel()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
printf("can we print from a kernel? -- %i\n", i);
}
void __global__ testKernel_doubleValues(float* array_device, int numElements, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
result[i] = 2*array_device[i];
}
}
void __global__ kernal_printSubsection(float* array_device, int minRow, int maxRow, int minCol, int maxCol, int numRows, int numColumns){
printf("Accessing from kernel:\n");
for (int n=minRow; n<maxRow; n++)
{
for (int m=minCol; m<maxCol; m++)
{
int flattenedInd = n*numColumns + m;
printf("(%i,%i)->%i %f\n", n,m,flattenedInd, array_device[flattenedInd]);
}
printf("\n");
}
}
void __global__ kernel_gpuBasicOps(int operationID, float* array_device, int this_totalColumns, int this_totalRows, int* this_rowArray, int this_rowArrayLength, int* this_colArray, int this_colArrayLength, float* other_device, int section_numCols, int section_numRows, int otherLength, bool inPlace, float* results_device) {
// // we're assuming that the other_totalCols and other_totalRows is the entire matrix
// // while "this" that's already in the gpu is a relative index
// so I think we'll have two different modes of operation here
// 1st mode:
// other_device has same number of elements as whatever we're comparing it to in array_device
// 2nd mode:
// other_device has a single element, in which case we should compare all values in array_device to that single value (ie array_device[:,0] + 5)
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < this_rowArrayLength && m < this_colArrayLength)
{
printf("Kernel Operation on (%i,%i) -- otherLength=%i\n", n,m, otherLength);
int this_flattenedInd;
int other_flattenedInd;
int results_flattenedInd;
if (otherLength == 1)
{
this_flattenedInd = this_rowArray[n]*this_totalColumns + this_colArray[m];
other_flattenedInd = 0;
results_flattenedInd = n*section_numCols + m;
}
else
{
this_flattenedInd = this_rowArray[n]*this_totalColumns + this_colArray[m];
other_flattenedInd = n*section_numCols + m;
results_flattenedInd = n*section_numCols + m;
}
//printf("accessing (%i,%i)<->(%i,%i)\n", this_rowArray[n], this_colArray[m], n, m);
// while I don't really like this solution, it does keep us from having to create separate kernels for every single operation
if (operationID == 0) // **ADDITION**
{
printf("Adding %f to %f at location %i\n", other_device[other_flattenedInd], array_device[this_flattenedInd], this_flattenedInd);
if (inPlace)
array_device[this_flattenedInd] = array_device[this_flattenedInd] + other_device[other_flattenedInd];
else
results_device[results_flattenedInd] = array_device[this_flattenedInd] + other_device[other_flattenedInd];
}
else if (operationID == 1) // **SUBTRACTION**
{
if (inPlace)
array_device[this_flattenedInd] = array_device[this_flattenedInd] - other_device[other_flattenedInd];
else
results_device[results_flattenedInd] = array_device[this_flattenedInd] - other_device[other_flattenedInd];
}
else if (operationID == 2) // **DIVISION**
{
if (inPlace)
array_device[this_flattenedInd] = array_device[this_flattenedInd] / other_device[other_flattenedInd];
else
results_device[results_flattenedInd] = array_device[this_flattenedInd] / other_device[other_flattenedInd];
}
else if (operationID == 3) // **MULTIPLICATION**
{
if (inPlace)
array_device[this_flattenedInd] = array_device[this_flattenedInd] * other_device[other_flattenedInd];
else
results_device[results_flattenedInd] = array_device[this_flattenedInd] * other_device[other_flattenedInd];
}
else if (operationID == 4) // GREATER THAN
{
if (inPlace)
printf("GREATER THAN operation is only valid for not-in-place");
else
{
bool val = array_device[this_flattenedInd] > other_device[other_flattenedInd];
if (val)
results_device[results_flattenedInd] = 1;
else
results_device[results_flattenedInd] = 0;
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.